diff --git a/Makefile b/Makefile index 0a36670dd6c..19c6d597680 100644 --- a/Makefile +++ b/Makefile @@ -82,10 +82,6 @@ download: wget -nv -N http://dl.fbaipublicfiles.com/pythia/data/vocab.tar.gz -P $(DATADIR) tar $(TAROPTS) -xzf $(DATADIR)/vocab.tar.gz -C ./beginner_source/data/ - # Download dataset for beginner_source/torchtext_custom_dataset_tutorial.py - wget -nv -N https://www.manythings.org/anki/deu-eng.zip -P $(DATADIR) - unzip -o $(DATADIR)/deu-eng.zip -d beginner_source/data/ - # Download PennFudanPed dataset for intermediate_source/torchvision_tutorial.py wget https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip -P $(DATADIR) unzip -o $(DATADIR)/PennFudanPed.zip -d intermediate_source/data/ diff --git a/beginner_source/text_sentiment_ngrams_tutorial.py b/beginner_source/text_sentiment_ngrams_tutorial.py deleted file mode 100644 index 9cc5d6c8671..00000000000 --- a/beginner_source/text_sentiment_ngrams_tutorial.py +++ /dev/null @@ -1,372 +0,0 @@ -""" -Text classification with the torchtext library -============================================== - -In this tutorial, we will show how to use the torchtext library to build the dataset for the text classification analysis. Users will have the flexibility to - - - Access to the raw data as an iterator - - Build data processing pipeline to convert the raw text strings into ``torch.Tensor`` that can be used to train the model - - Shuffle and iterate the data with `torch.utils.data.DataLoader `__ - - -Prerequisites -~~~~~~~~~~~~~~~~ - -A recent 2.x version of the ``portalocker`` package needs to be installed prior to running the tutorial. -For example, in the Colab environment, this can be done by adding the following line at the top of the script: - -.. code-block:: bash - - !pip install -U portalocker>=2.0.0` - -""" - - -###################################################################### -# Access to the raw dataset iterators -# ----------------------------------- -# -# The torchtext library provides a few raw dataset iterators, which yield the raw text strings. For example, the ``AG_NEWS`` dataset iterators yield the raw data as a tuple of label and text. -# -# To access torchtext datasets, please install torchdata following instructions at https://github.com/pytorch/data. -# - -import torch -from torchtext.datasets import AG_NEWS - -train_iter = iter(AG_NEWS(split="train")) - -###################################################################### -# .. code-block:: sh -# -# next(train_iter) -# >>> (3, "Fears for T N pension after talks Unions representing workers at Turner -# Newall say they are 'disappointed' after talks with stricken parent firm Federal -# Mogul.") -# -# next(train_iter) -# >>> (4, "The Race is On: Second Private Team Sets Launch Date for Human -# Spaceflight (SPACE.com) SPACE.com - TORONTO, Canada -- A second\\team of -# rocketeers competing for the #36;10 million Ansari X Prize, a contest -# for\\privately funded suborbital space flight, has officially announced -# the first\\launch date for its manned rocket.") -# -# next(train_iter) -# >>> (4, 'Ky. Company Wins Grant to Study Peptides (AP) AP - A company founded -# by a chemistry researcher at the University of Louisville won a grant to develop -# a method of producing better peptides, which are short chains of amino acids, the -# building blocks of proteins.') -# - - -###################################################################### -# Prepare data processing pipelines -# --------------------------------- -# -# We have revisited the very basic components of the torchtext library, including vocab, word vectors, tokenizer. Those are the basic data processing building blocks for raw text string. -# -# Here is an example for typical NLP data processing with tokenizer and vocabulary. The first step is to build a vocabulary with the raw training dataset. Here we use built in -# factory function `build_vocab_from_iterator` which accepts iterator that yield list or iterator of tokens. Users can also pass any special symbols to be added to the -# vocabulary. - - -from torchtext.data.utils import get_tokenizer -from torchtext.vocab import build_vocab_from_iterator - -tokenizer = get_tokenizer("basic_english") -train_iter = AG_NEWS(split="train") - - -def yield_tokens(data_iter): - for _, text in data_iter: - yield tokenizer(text) - - -vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=[""]) -vocab.set_default_index(vocab[""]) - -###################################################################### -# The vocabulary block converts a list of tokens into integers. -# -# .. code-block:: sh -# -# vocab(['here', 'is', 'an', 'example']) -# >>> [475, 21, 30, 5297] -# -# Prepare the text processing pipeline with the tokenizer and vocabulary. The text and label pipelines will be used to process the raw data strings from the dataset iterators. - -text_pipeline = lambda x: vocab(tokenizer(x)) -label_pipeline = lambda x: int(x) - 1 - - -###################################################################### -# The text pipeline converts a text string into a list of integers based on the lookup table defined in the vocabulary. The label pipeline converts the label into integers. For example, -# -# .. code-block:: sh -# -# text_pipeline('here is the an example') -# >>> [475, 21, 2, 30, 5297] -# label_pipeline('10') -# >>> 9 -# - - -###################################################################### -# Generate data batch and iterator -# -------------------------------- -# -# `torch.utils.data.DataLoader `__ -# is recommended for PyTorch users (a tutorial is `here `__). -# It works with a map-style dataset that implements the ``getitem()`` and ``len()`` protocols, and represents a map from indices/keys to data samples. It also works with an iterable dataset with the shuffle argument of ``False``. -# -# Before sending to the model, ``collate_fn`` function works on a batch of samples generated from ``DataLoader``. The input to ``collate_fn`` is a batch of data with the batch size in ``DataLoader``, and ``collate_fn`` processes them according to the data processing pipelines declared previously. Pay attention here and make sure that ``collate_fn`` is declared as a top level def. This ensures that the function is available in each worker. -# -# In this example, the text entries in the original data batch input are packed into a list and concatenated as a single tensor for the input of ``nn.EmbeddingBag``. The offset is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. Label is a tensor saving the labels of individual text entries. - - -from torch.utils.data import DataLoader - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -def collate_batch(batch): - label_list, text_list, offsets = [], [], [0] - for _label, _text in batch: - label_list.append(label_pipeline(_label)) - processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) - text_list.append(processed_text) - offsets.append(processed_text.size(0)) - label_list = torch.tensor(label_list, dtype=torch.int64) - offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) - text_list = torch.cat(text_list) - return label_list.to(device), text_list.to(device), offsets.to(device) - - -train_iter = AG_NEWS(split="train") -dataloader = DataLoader( - train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch -) - - -###################################################################### -# Define the model -# ---------------- -# -# The model is composed of the `nn.EmbeddingBag `__ layer plus a linear layer for the classification purpose. ``nn.EmbeddingBag`` with the default mode of "mean" computes the mean value of a “bag” of embeddings. Although the text entries here have different lengths, ``nn.EmbeddingBag`` module requires no padding here since the text lengths are saved in offsets. -# -# Additionally, since ``nn.EmbeddingBag`` accumulates the average across -# the embeddings on the fly, ``nn.EmbeddingBag`` can enhance the -# performance and memory efficiency to process a sequence of tensors. -# -# .. image:: ../_static/img/text_sentiment_ngrams_model.png -# - -from torch import nn - - -class TextClassificationModel(nn.Module): - def __init__(self, vocab_size, embed_dim, num_class): - super(TextClassificationModel, self).__init__() - self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False) - self.fc = nn.Linear(embed_dim, num_class) - self.init_weights() - - def init_weights(self): - initrange = 0.5 - self.embedding.weight.data.uniform_(-initrange, initrange) - self.fc.weight.data.uniform_(-initrange, initrange) - self.fc.bias.data.zero_() - - def forward(self, text, offsets): - embedded = self.embedding(text, offsets) - return self.fc(embedded) - - -###################################################################### -# Initiate an instance -# -------------------- -# -# The ``AG_NEWS`` dataset has four labels and therefore the number of classes is four. -# -# .. code-block:: sh -# -# 1 : World -# 2 : Sports -# 3 : Business -# 4 : Sci/Tec -# -# We build a model with the embedding dimension of 64. The vocab size is equal to the length of the vocabulary instance. The number of classes is equal to the number of labels, -# - -train_iter = AG_NEWS(split="train") -num_class = len(set([label for (label, text) in train_iter])) -vocab_size = len(vocab) -emsize = 64 -model = TextClassificationModel(vocab_size, emsize, num_class).to(device) - - -###################################################################### -# Define functions to train the model and evaluate results. -# --------------------------------------------------------- -# - - -import time - - -def train(dataloader): - model.train() - total_acc, total_count = 0, 0 - log_interval = 500 - start_time = time.time() - - for idx, (label, text, offsets) in enumerate(dataloader): - optimizer.zero_grad() - predicted_label = model(text, offsets) - loss = criterion(predicted_label, label) - loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) - optimizer.step() - total_acc += (predicted_label.argmax(1) == label).sum().item() - total_count += label.size(0) - if idx % log_interval == 0 and idx > 0: - elapsed = time.time() - start_time - print( - "| epoch {:3d} | {:5d}/{:5d} batches " - "| accuracy {:8.3f}".format( - epoch, idx, len(dataloader), total_acc / total_count - ) - ) - total_acc, total_count = 0, 0 - start_time = time.time() - - -def evaluate(dataloader): - model.eval() - total_acc, total_count = 0, 0 - - with torch.no_grad(): - for idx, (label, text, offsets) in enumerate(dataloader): - predicted_label = model(text, offsets) - loss = criterion(predicted_label, label) - total_acc += (predicted_label.argmax(1) == label).sum().item() - total_count += label.size(0) - return total_acc / total_count - - -###################################################################### -# Split the dataset and run the model -# ----------------------------------- -# -# Since the original ``AG_NEWS`` has no valid dataset, we split the training -# dataset into train/valid sets with a split ratio of 0.95 (train) and -# 0.05 (valid). Here we use -# `torch.utils.data.dataset.random_split `__ -# function in PyTorch core library. -# -# `CrossEntropyLoss `__ -# criterion combines ``nn.LogSoftmax()`` and ``nn.NLLLoss()`` in a single class. -# It is useful when training a classification problem with C classes. -# `SGD `__ -# implements stochastic gradient descent method as the optimizer. The initial -# learning rate is set to 5.0. -# `StepLR `__ -# is used here to adjust the learning rate through epochs. -# - - -from torch.utils.data.dataset import random_split -from torchtext.data.functional import to_map_style_dataset - -# Hyperparameters -EPOCHS = 10 # epoch -LR = 5 # learning rate -BATCH_SIZE = 64 # batch size for training - -criterion = torch.nn.CrossEntropyLoss() -optimizer = torch.optim.SGD(model.parameters(), lr=LR) -scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1) -total_accu = None -train_iter, test_iter = AG_NEWS() -train_dataset = to_map_style_dataset(train_iter) -test_dataset = to_map_style_dataset(test_iter) -num_train = int(len(train_dataset) * 0.95) -split_train_, split_valid_ = random_split( - train_dataset, [num_train, len(train_dataset) - num_train] -) - -train_dataloader = DataLoader( - split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch -) -valid_dataloader = DataLoader( - split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch -) -test_dataloader = DataLoader( - test_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch -) - -for epoch in range(1, EPOCHS + 1): - epoch_start_time = time.time() - train(train_dataloader) - accu_val = evaluate(valid_dataloader) - if total_accu is not None and total_accu > accu_val: - scheduler.step() - else: - total_accu = accu_val - print("-" * 59) - print( - "| end of epoch {:3d} | time: {:5.2f}s | " - "valid accuracy {:8.3f} ".format( - epoch, time.time() - epoch_start_time, accu_val - ) - ) - print("-" * 59) - - -###################################################################### -# Evaluate the model with test dataset -# ------------------------------------ -# - - -###################################################################### -# Checking the results of the test dataset… - -print("Checking the results of test dataset.") -accu_test = evaluate(test_dataloader) -print("test accuracy {:8.3f}".format(accu_test)) - - -###################################################################### -# Test on a random news -# --------------------- -# -# Use the best model so far and test a golf news. -# - - -ag_news_label = {1: "World", 2: "Sports", 3: "Business", 4: "Sci/Tec"} - - -def predict(text, text_pipeline): - with torch.no_grad(): - text = torch.tensor(text_pipeline(text)) - output = model(text, torch.tensor([0])) - return output.argmax(1).item() + 1 - - -ex_text_str = "MEMPHIS, Tenn. – Four days ago, Jon Rahm was \ - enduring the season’s worst weather conditions on Sunday at The \ - Open on his way to a closing 75 at Royal Portrush, which \ - considering the wind and the rain was a respectable showing. \ - Thursday’s first round at the WGC-FedEx St. Jude Invitational \ - was another story. With temperatures in the mid-80s and hardly any \ - wind, the Spaniard was 13 strokes better in a flawless round. \ - Thanks to his best putting performance on the PGA Tour, Rahm \ - finished with an 8-under 62 for a three-stroke lead, which \ - was even more impressive considering he’d never played the \ - front nine at TPC Southwind." - -model = model.to("cpu") - -print("This is a %s news" % ag_news_label[predict(ex_text_str, text_pipeline)]) diff --git a/beginner_source/text_sentiment_ngrams_tutorial.rst b/beginner_source/text_sentiment_ngrams_tutorial.rst new file mode 100644 index 00000000000..024d04056c5 --- /dev/null +++ b/beginner_source/text_sentiment_ngrams_tutorial.rst @@ -0,0 +1,12 @@ +:orphan: + +Text classification with the torchtext library +============================================== + +This tutorial has been deprecated. + +Redirecting in 3 seconds... + +.. raw:: html + + diff --git a/beginner_source/torchtext_custom_dataset_tutorial.py b/beginner_source/torchtext_custom_dataset_tutorial.py deleted file mode 100644 index 9875d8aa43a..00000000000 --- a/beginner_source/torchtext_custom_dataset_tutorial.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Preprocess custom text dataset using Torchtext -=============================================== - -**Author**: `Anupam Sharma `_ - -This tutorial illustrates the usage of torchtext on a dataset that is not built-in. In the tutorial, -we will preprocess a dataset that can be further utilized to train a sequence-to-sequence -model for machine translation (something like, in this tutorial: `Sequence to Sequence Learning -with Neural Networks `_) but without using legacy version -of torchtext. - -In this tutorial, we will learn how to: - -* Read a dataset -* Tokenize sentence -* Apply transforms to sentence -* Perform bucket batching - -Let us assume that we need to prepare a dataset to train a model that can perform English to -German translation. We will use a tab-delimited German - English sentence pairs provided by -the `Tatoeba Project `_ which can be downloaded from -`this link `__. - -Sentence pairs for other languages can be found in `this link `\ -__. -""" - -# %% -# Setup -# ----- -# -# First, download the dataset, extract the zip, and note the path to the file `deu.txt`. -# -# Ensure that following packages are installed: -# -# * `Torchdata 0.6.0 `_ (`Installation instructions \ -# `__) -# * `Torchtext 0.15.0 `_ (`Installation instructions \ -# `__) -# * `Spacy `__ -# -# Here, we are using `Spacy` to tokenize text. In simple words tokenization means to -# convert a sentence to list of words. Spacy is a python package used for various Natural -# Language Processing (NLP) tasks. -# -# Download the English and German models from Spacy as shown below: -# -# .. code-block:: shell -# -# python -m spacy download en_core_web_sm -# python -m spacy download de_core_news_sm -# - - -# %% -# Let us start by importing required modules: - -import torchdata.datapipes as dp -import torchtext.transforms as T -import spacy -from torchtext.vocab import build_vocab_from_iterator -eng = spacy.load("en_core_web_sm") # Load the English model to tokenize English text -de = spacy.load("de_core_news_sm") # Load the German model to tokenize German text - -# %% -# Now we will load the dataset - -FILE_PATH = 'data/deu.txt' -data_pipe = dp.iter.IterableWrapper([FILE_PATH]) -data_pipe = dp.iter.FileOpener(data_pipe, mode='rb') -data_pipe = data_pipe.parse_csv(skip_lines=0, delimiter='\t', as_tuple=True) - -# %% -# In the above code block, we are doing following things: -# -# 1. At line 2, we are creating an iterable of filenames -# 2. At line 3, we pass the iterable to `FileOpener` which then -# opens the file in read mode -# 3. At line 4, we call a function to parse the file, which -# again returns an iterable of tuples representing each rows -# of the tab-delimited file -# -# DataPipes can be thought of something like a dataset object, on which -# we can perform various operations. -# Check `this tutorial `_ for more details on -# DataPipes. -# -# We can verify if the iterable has the pair of sentences as shown -# below: - -for sample in data_pipe: - print(sample) - break - -# %% -# Note that we also have attribution details along with pair of sentences. We will -# write a small function to remove the attribution details: - -def removeAttribution(row): - """ - Function to keep the first two elements in a tuple - """ - return row[:2] -data_pipe = data_pipe.map(removeAttribution) - -# %% -# The `map` function at line 6 in above code block can be used to apply some function -# on each elements of `data_pipe`. Now, we can verify that the `data_pipe` only contains -# pair of sentences. - - -for sample in data_pipe: - print(sample) - break - -# %% -# Now, let us define few functions to perform tokenization: - -def engTokenize(text): - """ - Tokenize an English text and return a list of tokens - """ - return [token.text for token in eng.tokenizer(text)] - -def deTokenize(text): - """ - Tokenize a German text and return a list of tokens - """ - return [token.text for token in de.tokenizer(text)] - -# %% -# Above function accepts a text and returns a list of words -# as shown below: - -print(engTokenize("Have a good day!!!")) -print(deTokenize("Haben Sie einen guten Tag!!!")) - -# %% -# Building the vocabulary -# ----------------------- -# Let us consider an English sentence as the source and a German sentence as the target. -# -# Vocabulary can be considered as the set of unique words we have in the dataset. -# We will build vocabulary for both our source and target now. -# -# Let us define a function to get tokens from elements of tuples in the iterator. - - -def getTokens(data_iter, place): - """ - Function to yield tokens from an iterator. Since, our iterator contains - tuple of sentences (source and target), `place` parameters defines for which - index to return the tokens for. `place=0` for source and `place=1` for target - """ - for english, german in data_iter: - if place == 0: - yield engTokenize(english) - else: - yield deTokenize(german) - -# %% -# Now, we will build vocabulary for source: - -source_vocab = build_vocab_from_iterator( - getTokens(data_pipe,0), - min_freq=2, - specials= ['', '', '', ''], - special_first=True -) -source_vocab.set_default_index(source_vocab['']) - -# %% -# The code above, builds the vocabulary from the iterator. In the above code block: -# -# * At line 2, we call the `getTokens()` function with `place=0` as we need vocabulary for -# source sentences. -# * At line 3, we set `min_freq=2`. This means, the function will skip those words that occurs -# less than 2 times. -# * At line 4, we specify some special tokens: -# -# * `` for start of sentence -# * `` for end of sentence -# * `` for unknown words. An example of unknown word is the one skipped because of -# `min_freq=2`. -# * `` is the padding token. While training, a model we mostly train in batches. In a -# batch, there can be sentences of different length. So, we pad the shorter sentences with -# `` token to make length of all sequences in the batch equal. -# -# * At line 5, we set `special_first=True`. Which means `` will get index 0, `` index 1, -# `` index 2, and will get index 3 in the vocabulary. -# * At line 7, we set default index as index of ``. That means if some word is not in -# vocabulary, we will use `` instead of that unknown word. -# -# Similarly, we will build vocabulary for target sentences: - -target_vocab = build_vocab_from_iterator( - getTokens(data_pipe,1), - min_freq=2, - specials= ['', '', '', ''], - special_first=True -) -target_vocab.set_default_index(target_vocab['']) - -# %% -# Note that the example above shows how can we add special tokens to our vocabulary. The -# special tokens may change based on the requirements. -# -# Now, we can verify that special tokens are placed at the beginning and then other words. -# In the below code, `source_vocab.get_itos()` returns a list with tokens at index based on -# vocabulary. - -print(source_vocab.get_itos()[:9]) - -# %% -# Numericalize sentences using vocabulary -# --------------------------------------- -# After building the vocabulary, we need to convert our sentences to corresponding indices. -# Let us define some functions for this: - -def getTransform(vocab): - """ - Create transforms based on given vocabulary. The returned transform is applied to sequence - of tokens. - """ - text_tranform = T.Sequential( - ## converts the sentences to indices based on given vocabulary - T.VocabTransform(vocab=vocab), - ## Add at beginning of each sentence. 1 because the index for in vocabulary is - # 1 as seen in previous section - T.AddToken(1, begin=True), - ## Add at beginning of each sentence. 2 because the index for in vocabulary is - # 2 as seen in previous section - T.AddToken(2, begin=False) - ) - return text_tranform - -# %% -# Now, let us see how to use the above function. The function returns an object of `Transforms` -# which we will use on our sentence. Let us take a random sentence and check how the transform -# works. - -temp_list = list(data_pipe) -some_sentence = temp_list[798][0] -print("Some sentence=", end="") -print(some_sentence) -transformed_sentence = getTransform(source_vocab)(engTokenize(some_sentence)) -print("Transformed sentence=", end="") -print(transformed_sentence) -index_to_string = source_vocab.get_itos() -for index in transformed_sentence: - print(index_to_string[index], end=" ") - -# %% -# In the above code,: -# -# * At line 2, we take a source sentence from list that we created from `data_pipe` at line 1 -# * At line 5, we get a transform based on a source vocabulary and apply it to a tokenized -# sentence. Note that transforms take list of words and not a sentence. -# * At line 8, we get the mapping of index to string and then use it get the transformed -# sentence -# -# Now we will use DataPipe functions to apply transform to all our sentences. -# Let us define some more functions for this. - -def applyTransform(sequence_pair): - """ - Apply transforms to sequence of tokens in a sequence pair - """ - - return ( - getTransform(source_vocab)(engTokenize(sequence_pair[0])), - getTransform(target_vocab)(deTokenize(sequence_pair[1])) - ) -data_pipe = data_pipe.map(applyTransform) ## Apply the function to each element in the iterator -temp_list = list(data_pipe) -print(temp_list[0]) - -# %% -# Make batches (with bucket batch) -# -------------------------------- -# Generally, we train models in batches. While working for sequence to sequence models, it is -# recommended to keep the length of sequences in a batch similar. For that we will use -# `bucketbatch` function of `data_pipe`. -# -# Let us define some functions that will be used by the `bucketbatch` function. - -def sortBucket(bucket): - """ - Function to sort a given bucket. Here, we want to sort based on the length of - source and target sequence. - """ - return sorted(bucket, key=lambda x: (len(x[0]), len(x[1]))) - -# %% -# Now, we will apply the `bucketbatch` function: - -data_pipe = data_pipe.bucketbatch( - batch_size = 4, batch_num=5, bucket_num=1, - use_in_batch_shuffle=False, sort_key=sortBucket -) - -# %% -# In the above code block: -# -# * We keep batch size = 4. -# * `batch_num` is the number of batches to keep in a bucket -# * `bucket_num` is the number of buckets to keep in a pool for shuffling -# * `sort_key` specifies the function that takes a bucket and sorts it -# -# Now, let us consider a batch of source sentences as `X` and a batch of target sentences as `y`. -# Generally, while training a model, we predict on a batch of `X` and compare the result with `y`. -# But, a batch in our `data_pipe` is of the form `[(X_1,y_1), (X_2,y_2), (X_3,y_3), (X_4,y_4)]`: - -print(list(data_pipe)[0]) -# %% -# So, we will now convert them into the form: `((X_1,X_2,X_3,X_4), (y_1,y_2,y_3,y_4))`. -# For this we will write a small function: - -def separateSourceTarget(sequence_pairs): - """ - input of form: `[(X_1,y_1), (X_2,y_2), (X_3,y_3), (X_4,y_4)]` - output of form: `((X_1,X_2,X_3,X_4), (y_1,y_2,y_3,y_4))` - """ - sources,targets = zip(*sequence_pairs) - return sources,targets - -## Apply the function to each element in the iterator -data_pipe = data_pipe.map(separateSourceTarget) -print(list(data_pipe)[0]) - -# %% -# Now, we have the data as desired. -# -# Padding -# ------- -# As discussed earlier while building vocabulary, we need to pad shorter sentences in a batch to -# make all the sequences in a batch of equal length. We can perform padding as follows: - -def applyPadding(pair_of_sequences): - """ - Convert sequences to tensors and apply padding - """ - return (T.ToTensor(0)(list(pair_of_sequences[0])), T.ToTensor(0)(list(pair_of_sequences[1]))) -## `T.ToTensor(0)` returns a transform that converts the sequence to `torch.tensor` and also applies -# padding. Here, `0` is passed to the constructor to specify the index of the `` token in the -# vocabulary. -data_pipe = data_pipe.map(applyPadding) - -# %% -# Now, we can use the index to string mapping to see how the sequence would look with tokens -# instead of indices: - -source_index_to_string = source_vocab.get_itos() -target_index_to_string = target_vocab.get_itos() - -def showSomeTransformedSentences(data_pipe): - """ - Function to show how the sentences look like after applying all transforms. - Here we try to print actual words instead of corresponding index - """ - for sources,targets in data_pipe: - if sources[0][-1] != 0: - continue # Just to visualize padding of shorter sentences - for i in range(4): - source = "" - for token in sources[i]: - source += " " + source_index_to_string[token] - target = "" - for token in targets[i]: - target += " " + target_index_to_string[token] - print(f"Source: {source}") - print(f"Traget: {target}") - break - -showSomeTransformedSentences(data_pipe) -# %% -# In the above output we can observe that the shorter sentences are padded with ``. Now, we -# can use `data_pipe` while writing our training function. -# -# Some parts of this tutorial was inspired from `this article -# `__. diff --git a/beginner_source/torchtext_custom_dataset_tutorial.rst b/beginner_source/torchtext_custom_dataset_tutorial.rst new file mode 100644 index 00000000000..9f014f3ff9a --- /dev/null +++ b/beginner_source/torchtext_custom_dataset_tutorial.rst @@ -0,0 +1,12 @@ +:orphan: + +Preprocess custom text dataset using torchtext +============================================== + +This tutorial has been deprecated. + +Redirecting in 3 seconds... + +.. raw:: html + + diff --git a/index.rst b/index.rst index 91517834fd8..ad8f5cf09f0 100644 --- a/index.rst +++ b/index.rst @@ -261,13 +261,6 @@ Welcome to PyTorch Tutorials :link: intermediate/seq2seq_translation_tutorial.html :tags: Text -.. customcarditem:: - :header: Text Classification with Torchtext - :card_description: Learn how to build the dataset and classify text using torchtext library. - :image: _static/img/thumbnails/cropped/Text-Classification-with-TorchText.png - :link: beginner/text_sentiment_ngrams_tutorial.html - :tags: Text - .. customcarditem:: :header: Language Translation with Transformer :card_description: Train a language translation model from scratch using Transformer. @@ -275,13 +268,6 @@ Welcome to PyTorch Tutorials :link: beginner/translation_transformer.html :tags: Text -.. customcarditem:: - :header: Pre-process custom text dataset using Torchtext - :card_description: Learn how to use torchtext to prepare a custom dataset - :image: _static/img/thumbnails/cropped/torch_text_logo.png - :link: beginner/torchtext_custom_dataset_tutorial.html - :tags: Text - .. ONNX @@ -1020,9 +1006,7 @@ Additional Resources intermediate/char_rnn_classification_tutorial intermediate/char_rnn_generation_tutorial intermediate/seq2seq_translation_tutorial - beginner/text_sentiment_ngrams_tutorial beginner/translation_transformer - beginner/torchtext_custom_dataset_tutorial .. toctree::