From 9a5f417d06c84dc53d57b68d788d83ec5055edcd Mon Sep 17 00:00:00 2001 From: bentrevett Date: Mon, 14 Sep 2020 22:31:16 +0100 Subject: [PATCH] added nbow with embeddingbag example --- experimental/1_nbow.ipynb | 4 +- experimental/a_nbow-bag.ipynb | 950 ++++++++++++++++++++++++++++++++++ 2 files changed, 952 insertions(+), 2 deletions(-) create mode 100644 experimental/a_nbow-bag.ipynb diff --git a/experimental/1_nbow.ipynb b/experimental/1_nbow.ipynb index e7130a8..80388c3 100644 --- a/experimental/1_nbow.ipynb +++ b/experimental/1_nbow.ipynb @@ -78,7 +78,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n" + "\n" ] } ], @@ -1306,7 +1306,7 @@ "Epoch: 03 | Epoch Time: 0m 4s\n", "\tTrain Loss: 0.629 | Train Acc: 72.45%\n", "\t Val. Loss: 0.611 | Val. Acc: 73.54%\n", - "Epoch: 04 | Epoch Time: 0m 5s\n", + "Epoch: 04 | Epoch Time: 0m 4s\n", "\tTrain Loss: 0.583 | Train Acc: 76.17%\n", "\t Val. Loss: 0.566 | Val. Acc: 77.00%\n", "Epoch: 05 | Epoch Time: 0m 4s\n", diff --git a/experimental/a_nbow-bag.ipynb b/experimental/a_nbow-bag.ipynb new file mode 100644 index 0000000..097aec4 --- /dev/null +++ b/experimental/a_nbow-bag.ipynb @@ -0,0 +1,950 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 228 + }, + "colab_type": "code", + "id": "-V90fMxJdFl7", + "outputId": "2bbc3f28-84e3-47bd-97a2-ea0c2f0cf395" + }, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "\n", + "import torchtext\n", + "import torchtext.experimental\n", + "import torchtext.experimental.vectors\n", + "from torchtext.experimental.datasets.raw.text_classification import RawTextIterableDataset\n", + "from torchtext.experimental.datasets.text_classification import TextClassificationDataset\n", + "from torchtext.experimental.functional import sequential_transforms, vocab_func, totensor\n", + "\n", + "import collections\n", + "import random\n", + "import time" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "tOO7b-Z1dFmA" + }, + "outputs": [], + "source": [ + "seed = 1234\n", + "\n", + "torch.manual_seed(seed)\n", + "random.seed(seed)\n", + "torch.backends.cudnn.deterministic = True\n", + "torch.backends.cudnn.benchmark = False" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FhBXG95YdFmD" + }, + "outputs": [], + "source": [ + "raw_train_data, raw_test_data = torchtext.experimental.datasets.raw.IMDB()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "rOTczrIEdFmY" + }, + "outputs": [], + "source": [ + "def get_train_valid_split(raw_train_data, split_ratio = 0.7):\n", + "\n", + " raw_train_data = list(raw_train_data)\n", + " \n", + " random.shuffle(raw_train_data)\n", + " \n", + " n_train_examples = int(len(raw_train_data) * split_ratio)\n", + " \n", + " train_data = raw_train_data[:n_train_examples]\n", + " valid_data = raw_train_data[n_train_examples:]\n", + " \n", + " train_data = RawTextIterableDataset(train_data)\n", + " valid_data = RawTextIterableDataset(valid_data)\n", + " \n", + " return train_data, valid_data" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "C6Tp4CyQdFma" + }, + "outputs": [], + "source": [ + "raw_train_data, raw_valid_data = get_train_valid_split(raw_train_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "LTJjCocRdFmh" + }, + "outputs": [], + "source": [ + "class Tokenizer:\n", + " def __init__(self, tokenize_fn = 'basic_english', lower = True, max_length = None):\n", + " \n", + " self.tokenize_fn = torchtext.data.utils.get_tokenizer(tokenize_fn)\n", + " self.lower = lower\n", + " self.max_length = max_length\n", + " \n", + " def tokenize(self, s):\n", + " \n", + " tokens = self.tokenize_fn(s)\n", + " \n", + " if self.lower:\n", + " tokens = [token.lower() for token in tokens]\n", + " \n", + " if self.max_length is not None:\n", + " tokens = tokens[:self.max_length]\n", + " \n", + " return tokens" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "5P2KumuDdFmj" + }, + "outputs": [], + "source": [ + "max_length = 500\n", + "\n", + "tokenizer = Tokenizer(max_length = max_length)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "V1albCvadFmm", + "outputId": "5c7c30f2-c6b7-4098-990d-7bfcdc2446f1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['this', 'film', 'is', 'terrible', '.', 'i', 'hate', 'it', 'and', 'it', \"'\", 's', 'bad', '!']\n" + ] + } + ], + "source": [ + "s = \"this film is terrible. i hate it and it's bad!\"\n", + "\n", + "print(tokenizer.tokenize(s))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "anC7_ViodFmp" + }, + "outputs": [], + "source": [ + "def build_vocab_from_data(raw_data, tokenizer, **vocab_kwargs):\n", + " \n", + " token_freqs = collections.Counter()\n", + " \n", + " for label, text in raw_data:\n", + " tokens = tokenizer.tokenize(text)\n", + " token_freqs.update(tokens)\n", + " \n", + " vocab = torchtext.vocab.Vocab(token_freqs, **vocab_kwargs)\n", + " \n", + " return vocab" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "rgHPS1xzdFmt" + }, + "outputs": [], + "source": [ + "max_size = 25_000\n", + "\n", + "vocab = build_vocab_from_data(raw_train_data, tokenizer, max_size = max_size)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "xiW0GItTdFm6" + }, + "outputs": [], + "source": [ + "def process_raw_data(raw_data, tokenizer, vocab):\n", + " \n", + " raw_data = [(label, text) for (label, text) in raw_data]\n", + "\n", + " text_transform = sequential_transforms(tokenizer.tokenize,\n", + " vocab_func(vocab),\n", + " totensor(dtype=torch.long))\n", + " \n", + " label_transform = sequential_transforms(totensor(dtype=torch.long))\n", + "\n", + " transforms = (label_transform, text_transform)\n", + "\n", + " dataset = TextClassificationDataset(raw_data,\n", + " vocab,\n", + " transforms)\n", + " \n", + " return dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4Rec_Wk6dFnD" + }, + "outputs": [], + "source": [ + "train_data = process_raw_data(raw_train_data, tokenizer, vocab)\n", + "valid_data = process_raw_data(raw_valid_data, tokenizer, vocab)\n", + "test_data = process_raw_data(raw_test_data, tokenizer, vocab)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "egzlLweTdFnH" + }, + "outputs": [], + "source": [ + "class Collator: \n", + " def collate(self, batch):\n", + " \n", + " labels, text = zip(*batch)\n", + " \n", + " labels = torch.LongTensor(labels)\n", + " \n", + " lengths = [len(x) for x in text]\n", + " lengths = torch.LongTensor([0] + lengths[:-1])\n", + " \n", + " offsets = torch.cumsum(lengths, dim = 0)\n", + " \n", + " text = torch.cat(text)\n", + " \n", + " return labels, text, offsets" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "TYLvjhoSdFnM" + }, + "outputs": [], + "source": [ + "collator = Collator()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "7Ly4l1I8dFnR" + }, + "outputs": [], + "source": [ + "batch_size = 256\n", + "\n", + "train_iterator = torch.utils.data.DataLoader(train_data, \n", + " batch_size, \n", + " shuffle = True, \n", + " collate_fn = collator.collate)\n", + "\n", + "valid_iterator = torch.utils.data.DataLoader(valid_data, \n", + " batch_size, \n", + " shuffle = False, \n", + " collate_fn = collator.collate)\n", + "\n", + "test_iterator = torch.utils.data.DataLoader(test_data, \n", + " batch_size, \n", + " shuffle = False, \n", + " collate_fn = collator.collate)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "dbh38jHEdFnV" + }, + "outputs": [], + "source": [ + "class NBOW(nn.Module):\n", + " def __init__(self, input_dim, emb_dim, output_dim):\n", + " super().__init__()\n", + " \n", + " self.embedding = nn.EmbeddingBag(input_dim, emb_dim)\n", + " self.fc = nn.Linear(emb_dim, output_dim)\n", + " \n", + " def forward(self, text, offsets):\n", + " \n", + " # text = [seq len * batch size]\n", + " # offsets = [batch size]\n", + " \n", + " embedded = self.embedding(text, offsets)\n", + " \n", + " # embedded = [batch size, emb dim]\n", + " \n", + " prediction = self.fc(embedded)\n", + " \n", + " # prediction = [batch size, output dim]\n", + " \n", + " return prediction" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Ga1nXhindFnZ" + }, + "outputs": [], + "source": [ + "input_dim = len(vocab)\n", + "emb_dim = 100\n", + "output_dim = 2\n", + "\n", + "model = NBOW(input_dim, emb_dim, output_dim)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "UyIJC0tYdFnc" + }, + "outputs": [], + "source": [ + "def count_parameters(model):\n", + " return sum(p.numel() for p in model.parameters() if p.requires_grad)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "1sJRLyewdFng", + "outputId": "e7e357e1-1cc7-4aa4-ff40-4d749209759d" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The model has 2,500,402 trainable parameters\n" + ] + } + ], + "source": [ + "print(f'The model has {count_parameters(model):,} trainable parameters')" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "BPsihrZudFnl" + }, + "outputs": [], + "source": [ + "glove = torchtext.experimental.vectors.GloVe(name = '6B',\n", + " dim = emb_dim)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4BFftRDMdFnx" + }, + "outputs": [], + "source": [ + "def get_pretrained_embedding(initial_embedding, pretrained_vectors, vocab, unk_token):\n", + " \n", + " pretrained_embedding = torch.FloatTensor(initial_embedding.weight.clone()).detach() \n", + " pretrained_vocab = pretrained_vectors.vectors.get_stoi()\n", + " \n", + " unk_tokens = []\n", + " \n", + " for idx, token in enumerate(vocab.itos):\n", + " if token in pretrained_vocab:\n", + " pretrained_vector = pretrained_vectors[token]\n", + " pretrained_embedding[idx] = pretrained_vector\n", + " else:\n", + " unk_tokens.append(token)\n", + " \n", + " return pretrained_embedding, unk_tokens" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "QRToW07JdFnz" + }, + "outputs": [], + "source": [ + "unk_token = ''\n", + "\n", + "pretrained_embedding, unk_tokens = get_pretrained_embedding(model.embedding, glove, vocab, unk_token)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 139 + }, + "colab_type": "code", + "id": "AnE6D4MAdFn_", + "outputId": "8b3fea1a-9bcb-4fd9-ba78-72baee94f96a" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[-0.1117, -0.4966, 0.1631, ..., 1.5903, -0.1947, -0.2415],\n", + " [ 1.3204, 1.5997, -1.0792, ..., 0.6060, 0.2209, -0.8245],\n", + " [-0.0382, -0.2449, 0.7281, ..., -0.1459, 0.8278, 0.2706],\n", + " ...,\n", + " [-0.2925, 0.1087, 0.7920, ..., -0.3641, 0.1822, -0.4104],\n", + " [-0.7250, 0.7545, 0.1637, ..., -0.0144, -0.1761, 0.3418],\n", + " [ 1.1753, 0.0460, -0.3542, ..., 0.4510, 0.0485, -0.4015]])" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.embedding.weight.data.copy_(pretrained_embedding)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DJloauERdFoF" + }, + "outputs": [], + "source": [ + "optimizer = optim.Adam(model.parameters())" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "fPPZ0cs_dFoH" + }, + "outputs": [], + "source": [ + "criterion = nn.CrossEntropyLoss()" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "HGUcFIupdFoK", + "outputId": "e5d9b842-689b-49ca-a4f4-08574f0524ee" + }, + "outputs": [], + "source": [ + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Ynf7j6kQdFoM" + }, + "outputs": [], + "source": [ + "model = model.to(device)\n", + "criterion = criterion.to(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "977iykeOdFoP" + }, + "outputs": [], + "source": [ + "def calculate_accuracy(predictions, labels):\n", + " top_predictions = predictions.argmax(1, keepdim = True)\n", + " correct = top_predictions.eq(labels.view_as(top_predictions)).sum()\n", + " accuracy = correct.float() / labels.shape[0]\n", + " return accuracy" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "HPNI8DJudFoS" + }, + "outputs": [], + "source": [ + "def train(model, iterator, optimizer, criterion, device):\n", + " \n", + " epoch_loss = 0\n", + " epoch_acc = 0\n", + " \n", + " model.train()\n", + " \n", + " for labels, text, offsets in iterator:\n", + " \n", + " labels = labels.to(device)\n", + " text = text.to(device)\n", + " offsets = offsets.to(device)\n", + " \n", + " optimizer.zero_grad()\n", + " \n", + " predictions = model(text, offsets)\n", + " \n", + " loss = criterion(predictions, labels)\n", + " \n", + " acc = calculate_accuracy(predictions, labels)\n", + " \n", + " loss.backward()\n", + " \n", + " optimizer.step()\n", + " \n", + " epoch_loss += loss.item()\n", + " epoch_acc += acc.item()\n", + " \n", + " return epoch_loss / len(iterator), epoch_acc / len(iterator)" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "kp6pV5xKdFoV" + }, + "outputs": [], + "source": [ + "def evaluate(model, iterator, criterion, device):\n", + " \n", + " epoch_loss = 0\n", + " epoch_acc = 0\n", + " \n", + " model.eval()\n", + " \n", + " with torch.no_grad():\n", + " \n", + " for labels, text, offsets in iterator:\n", + "\n", + " labels = labels.to(device)\n", + " text = text.to(device)\n", + " offsets = offsets.to(device)\n", + " \n", + " predictions = model(text, offsets)\n", + " \n", + " loss = criterion(predictions, labels)\n", + " \n", + " acc = calculate_accuracy(predictions, labels)\n", + "\n", + " epoch_loss += loss.item()\n", + " epoch_acc += acc.item()\n", + " \n", + " return epoch_loss / len(iterator), epoch_acc / len(iterator)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "8YzL45gYdFoX" + }, + "outputs": [], + "source": [ + "def epoch_time(start_time, end_time):\n", + " elapsed_time = end_time - start_time\n", + " elapsed_mins = int(elapsed_time / 60)\n", + " elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n", + " return elapsed_mins, elapsed_secs" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 537 + }, + "colab_type": "code", + "id": "0A8wv7-xdFoa", + "outputId": "238f01bf-5438-482a-80ac-75c70cb20ed1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch: 01 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.678 | Train Acc: 63.94%\n", + "\t Val. Loss: 0.659 | Val. Acc: 70.86%\n", + "Epoch: 02 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.634 | Train Acc: 72.62%\n", + "\t Val. Loss: 0.608 | Val. Acc: 74.09%\n", + "Epoch: 03 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.570 | Train Acc: 77.51%\n", + "\t Val. Loss: 0.542 | Val. Acc: 78.16%\n", + "Epoch: 04 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.497 | Train Acc: 81.81%\n", + "\t Val. Loss: 0.477 | Val. Acc: 81.68%\n", + "Epoch: 05 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.430 | Train Acc: 84.98%\n", + "\t Val. Loss: 0.424 | Val. Acc: 84.21%\n", + "Epoch: 06 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.375 | Train Acc: 87.18%\n", + "\t Val. Loss: 0.387 | Val. Acc: 85.68%\n", + "Epoch: 07 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.334 | Train Acc: 88.58%\n", + "\t Val. Loss: 0.357 | Val. Acc: 86.61%\n", + "Epoch: 08 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.302 | Train Acc: 89.62%\n", + "\t Val. Loss: 0.337 | Val. Acc: 87.14%\n", + "Epoch: 09 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.274 | Train Acc: 90.65%\n", + "\t Val. Loss: 0.321 | Val. Acc: 87.67%\n", + "Epoch: 10 | Epoch Time: 0m 4s\n", + "\tTrain Loss: 0.253 | Train Acc: 91.37%\n", + "\t Val. Loss: 0.308 | Val. Acc: 88.14%\n" + ] + } + ], + "source": [ + "n_epochs = 10\n", + "\n", + "best_valid_loss = float('inf')\n", + "\n", + "for epoch in range(n_epochs):\n", + "\n", + " start_time = time.monotonic()\n", + " \n", + " train_loss, train_acc = train(model, train_iterator, optimizer, criterion, device)\n", + " valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, device)\n", + " \n", + " end_time = time.monotonic()\n", + "\n", + " epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n", + " \n", + " if valid_loss < best_valid_loss:\n", + " best_valid_loss = valid_loss\n", + " torch.save(model.state_dict(), 'nbow-model.pt')\n", + " \n", + " print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n", + " print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')\n", + " print(f'\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "oMHAuMFNdFoc", + "outputId": "58b32f9a-8c39-4818-b526-1a80e435f3ae" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Test Loss: 0.327 | Test Acc: 86.80%\n" + ] + } + ], + "source": [ + "model.load_state_dict(torch.load('nbow-model.pt'))\n", + "\n", + "test_loss, test_acc = evaluate(model, test_iterator, criterion, device)\n", + "\n", + "print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "sEDiSM3fdFog" + }, + "outputs": [], + "source": [ + "def predict_sentiment(tokenizer, vocab, model, device, sentence):\n", + " model.eval()\n", + " tokens = tokenizer.tokenize(sentence)\n", + " indexes = [vocab.stoi[token] for token in tokens]\n", + " tensor = torch.LongTensor(indexes).to(device)\n", + " offset = torch.LongTensor([0]).to(device)\n", + " prediction = model(tensor, offset)\n", + " probabilities = nn.functional.softmax(prediction, dim = -1)\n", + " pos_probability = probabilities.squeeze()[-1].item()\n", + " return pos_probability" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "ycEAWhYIdFoi", + "outputId": "8a675641-fd79-46a6-b4e6-0b2006f866cc" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0.00038787935045547783" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sentence = 'the absolute worst movie of all time.'\n", + "\n", + "predict_sentiment(tokenizer, vocab, model, device, sentence)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "cuMFqIoJdFok", + "outputId": "12c964fc-6788-459c-ad5e-ca0af366b1d4" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0.9986314177513123" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sentence = 'one of the greatest films i have ever seen in my life.'\n", + "\n", + "predict_sentiment(tokenizer, vocab, model, device, sentence)" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "zausUPENdFoo", + "outputId": "2bdd06df-dab7-47ea-8952-8bd82d39bac2" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0.6374390721321106" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sentence = \"i thought it was going to be one of the greatest films i have ever seen in my life, \\\n", + "but it was actually the absolute worst movie of all time.\"\n", + "\n", + "predict_sentiment(tokenizer, vocab, model, device, sentence)" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "colab_type": "code", + "id": "e15vpNJYdFor", + "outputId": "eed3ae38-d01a-4476-a235-8fd3582240f3" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0.6374390721321106" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sentence = \"i thought it was going to be the absolute worst movie of all time, \\\n", + "but it was actually one of the greatest films i have ever seen in my life.\"\n", + "\n", + "predict_sentiment(tokenizer, vocab, model, device, sentence)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "machine_shape": "hm", + "name": "1_nbow.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +}