From 510f82ea036b8b2b7d7f57baa53bcc719fa124e3 Mon Sep 17 00:00:00 2001 From: frasertajima <69366820+frasertajima@users.noreply.github.com> Date: Wed, 31 May 2023 13:17:24 -0700 Subject: [PATCH 01/67] Update transformer_tutorial.py (#2363) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix to "perhaps there is a misprint at line 40 #2111"; review of referenced paper https://arxiv.org/pdf/1706.03762.pdf section 3.2.3 suggests: "Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to −∞) all values in the input of the softmax which correspond to illegal connections. See Figure 2." Thus the suggested change in reference from nn.Transform.Encoder to nn.Transform.Decoder seems reasonable. --- beginner_source/transformer_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index d93b3d55fe7..57d1f8d8591 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -37,7 +37,7 @@ # ``nn.TransformerEncoder`` consists of multiple layers of # `nn.TransformerEncoderLayer `__. # Along with the input sequence, a square attention mask is required because the -# self-attention layers in ``nn.TransformerEncoder`` are only allowed to attend +# self-attention layers in ``nn.TransformerDecoder`` are only allowed to attend # the earlier positions in the sequence. For the language modeling task, any # tokens on the future positions should be masked. To produce a probability # distribution over output words, the output of the ``nn.TransformerEncoder`` From dfc6aa22c9905778d3687360ed83fcc28cac1b7d Mon Sep 17 00:00:00 2001 From: TheMemoryDealer <32904619+TheMemoryDealer@users.noreply.github.com> Date: Wed, 31 May 2023 21:59:13 +0100 Subject: [PATCH 02/67] Update dynamic_quantization_bert_tutorial.rst (#2369) As per suggestion in #1114 --- intermediate_source/dynamic_quantization_bert_tutorial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intermediate_source/dynamic_quantization_bert_tutorial.rst b/intermediate_source/dynamic_quantization_bert_tutorial.rst index d618df87d58..53ac2cd0afb 100644 --- a/intermediate_source/dynamic_quantization_bert_tutorial.rst +++ b/intermediate_source/dynamic_quantization_bert_tutorial.rst @@ -68,7 +68,7 @@ built-in F1 score calculation helper function. .. code:: shell pip install sklearn - pip install transformers + pip install transformers==4.29.2 Because we will be using the beta parts of the PyTorch, it is From d55a262b2279d35672eae674232ae135b45db368 Mon Sep 17 00:00:00 2001 From: frasertajima <69366820+frasertajima@users.noreply.github.com> Date: Wed, 31 May 2023 15:28:33 -0700 Subject: [PATCH 03/67] Update super_resolution_with_onnxruntime.py (#2370) Fix for #1781 Rather than manually update the version number with the current stable version (e.g., 2.0.1), as long as ONNX maintains compatibility with the lastest stable version that reference should be sufficient and constantly up to date. --- advanced_source/super_resolution_with_onnxruntime.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/advanced_source/super_resolution_with_onnxruntime.py b/advanced_source/super_resolution_with_onnxruntime.py index eb184e85109..835a79bd3a0 100644 --- a/advanced_source/super_resolution_with_onnxruntime.py +++ b/advanced_source/super_resolution_with_onnxruntime.py @@ -16,10 +16,7 @@ and `ONNX Runtime `__. You can get binary builds of ONNX and ONNX Runtime with ``pip install onnx onnxruntime``. -Note that ONNX Runtime is compatible with Python versions 3.5 to 3.7. - -``NOTE``: This tutorial needs PyTorch master branch which can be installed by following -the instructions `here `__ +ONNX Runtime recommends using the latest stable runtime for PyTorch. """ From 7aff96cb8e2ccdcf5fe9f6eeb08f28b374eeb9f9 Mon Sep 17 00:00:00 2001 From: frasertajima <69366820+frasertajima@users.noreply.github.com> Date: Wed, 31 May 2023 16:16:51 -0700 Subject: [PATCH 04/67] Update nn_tutorial.py (#2368) * Update nn_tutorial.py Fix to #1303 "add pyplot.show() in beginner tutorial." Comments to issue suggested manually commenting out pyplot.show for users not using colab. --------- Co-authored-by: Svetlana Karslioglu --- beginner_source/nn_tutorial.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/beginner_source/nn_tutorial.py b/beginner_source/nn_tutorial.py index bc32131b93a..7ee7df3b435 100644 --- a/beginner_source/nn_tutorial.py +++ b/beginner_source/nn_tutorial.py @@ -75,6 +75,11 @@ import numpy as np pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray") +# ``pyplot.show()`` only if not on Colab +try: + import google.colab +except ImportError: + pyplot.show() print(x_train.shape) ############################################################################### From 4673b1434bb03eaaa4a4519d9fe9da545b02748e Mon Sep 17 00:00:00 2001 From: Suhas G Date: Thu, 1 Jun 2023 01:17:56 +0200 Subject: [PATCH 05/67] Add model.eval() in neural_style_tutorial.py (#2371) --- advanced_source/neural_style_tutorial.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/advanced_source/neural_style_tutorial.py b/advanced_source/neural_style_tutorial.py index 3d84fc508bc..54085fb1e98 100644 --- a/advanced_source/neural_style_tutorial.py +++ b/advanced_source/neural_style_tutorial.py @@ -423,6 +423,9 @@ def run_style_transfer(cnn, normalization_mean, normalization_std, # We want to optimize the input and not the model parameters so we # update all the requires_grad fields accordingly input_img.requires_grad_(True) + # We also put the model in evaluation mode, so that specific layers + # such as dropout or batch normalization layers behave correctly. + model.eval() model.requires_grad_(False) optimizer = get_input_optimizer(input_img) From d686b662932a380a58b7683425faa00c06bcf502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Luis=20Castro=20Garc=C3=ADa?= <81191337+JoseLuisC99@users.noreply.github.com> Date: Wed, 31 May 2023 19:19:00 -0600 Subject: [PATCH 06/67] Fix train loop in trainingyt.py (#2372) * refactored train loop in trainingyt.py, resolves issue #2230 * Simplified numpy function call, resolves issue #1038 --- beginner_source/introyt/trainingyt.py | 20 ++++++++++++-------- intermediate_source/torchvision_tutorial.rst | 2 +- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/beginner_source/introyt/trainingyt.py b/beginner_source/introyt/trainingyt.py index 929e06c1b57..d9f585411e8 100644 --- a/beginner_source/introyt/trainingyt.py +++ b/beginner_source/introyt/trainingyt.py @@ -290,15 +290,19 @@ def train_one_epoch(epoch_index, tb_writer): model.train(True) avg_loss = train_one_epoch(epoch_number, writer) - # We don't need gradients on to do reporting - model.train(False) - + running_vloss = 0.0 - for i, vdata in enumerate(validation_loader): - vinputs, vlabels = vdata - voutputs = model(vinputs) - vloss = loss_fn(voutputs, vlabels) - running_vloss += vloss + # Set the model to evaluation mode, disabling dropout and using population + # statistics for batch normalization. + model.eval() + + # Disable gradient computation and reduce memory consumption. + with torch.no_grad(): + for i, vdata in enumerate(validation_loader): + vinputs, vlabels = vdata + voutputs = model(vinputs) + vloss = loss_fn(voutputs, vlabels) + running_vloss += vloss avg_vloss = running_vloss / (i + 1) print('LOSS train {} valid {}'.format(avg_loss, avg_vloss)) diff --git a/intermediate_source/torchvision_tutorial.rst b/intermediate_source/torchvision_tutorial.rst index 9e3d1b9655c..21d47e258f7 100644 --- a/intermediate_source/torchvision_tutorial.rst +++ b/intermediate_source/torchvision_tutorial.rst @@ -145,7 +145,7 @@ Let’s write a ``torch.utils.data.Dataset`` class for this dataset. num_objs = len(obj_ids) boxes = [] for i in range(num_objs): - pos = np.where(masks[i]) + pos = np.nonzero(masks[i]) xmin = np.min(pos[1]) xmax = np.max(pos[1]) ymin = np.min(pos[0]) From 0bee138587b12312df68237d4e57886896898c6e Mon Sep 17 00:00:00 2001 From: Beniamin Condrea <30630733+BeniaminC@users.noreply.github.com> Date: Wed, 31 May 2023 20:09:35 -0700 Subject: [PATCH 07/67] Added matplotlib dependency to blitz tutorial (#2366) * Added matplotlib dependency to blitz tutorial. * Removed a modified file from pull request --------- Co-authored-by: Carl Parker --- beginner_source/deep_learning_60min_blitz.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/beginner_source/deep_learning_60min_blitz.rst b/beginner_source/deep_learning_60min_blitz.rst index 09ac232cc49..6c96c403455 100644 --- a/beginner_source/deep_learning_60min_blitz.rst +++ b/beginner_source/deep_learning_60min_blitz.rst @@ -20,11 +20,12 @@ Goal of this tutorial: - Understand PyTorch’s Tensor library and neural networks at a high level. - Train a small neural network to classify images -To run the tutorials below, make sure you have the `torch`_ and `torchvision`_ -packages installed. +To run the tutorials below, make sure you have the `torch`_, `torchvision`_, +and `matplotlib`_ packages installed. .. _torch: https://github.com/pytorch/pytorch .. _torchvision: https://github.com/pytorch/vision +.. _matplotlib: https://github.com/matplotlib/matplotlib .. toctree:: :hidden: From d3686263fea8c2c625f0c4a3d46a2e28485701e1 Mon Sep 17 00:00:00 2001 From: Fabio Gomez Date: Thu, 1 Jun 2023 10:44:16 -0500 Subject: [PATCH 08/67] Fix formatting in the FX Graph Mode Quantization guide (#2362) * removed ### lines and numbered in headlines * removed numbered from titles * added blank lines to show code * Remove the empty TODO placeholder --------- Co-authored-by: Svetlana Karslioglu --- .../fx_graph_mode_quant_guide.rst | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/prototype_source/fx_graph_mode_quant_guide.rst b/prototype_source/fx_graph_mode_quant_guide.rst index bb360861b9f..9072e488a4b 100644 --- a/prototype_source/fx_graph_mode_quant_guide.rst +++ b/prototype_source/fx_graph_mode_quant_guide.rst @@ -4,7 +4,7 @@ **Author**: `Jerry Zhang `_ FX Graph Mode Quantization requires a symbolically traceable model. -We use the FX framework (TODO: link) to convert a symbolically traceable nn.Module instance to IR, +We use the FX framework to convert a symbolically traceable nn.Module instance to IR, and we operate on the IR to execute the quantization passes. Please post your question about symbolically tracing your model in `PyTorch Discussion Forum `_ @@ -22,16 +22,19 @@ You can use any combination of these options: b. Write your own observed and quantized submodule -#################################################################### If the code that is not symbolically traceable does not need to be quantized, we have the following two options to run FX Graph Mode Quantization: -1.a. Symbolically trace only the code that needs to be quantized + + +Symbolically trace only the code that needs to be quantized ----------------------------------------------------------------- When the whole model is not symbolically traceable but the submodule we want to quantize is symbolically traceable, we can run quantization only on that submodule. + before: .. code:: python + class M(nn.Module): def forward(self, x): x = non_traceable_code_1(x) @@ -42,6 +45,7 @@ before: after: .. code:: python + class FP32Traceable(nn.Module): def forward(self, x): x = traceable_code(x) @@ -69,8 +73,7 @@ Note if original model needs to be preserved, you will have to copy it yourself before calling the quantization APIs. -##################################################### -1.b. Skip symbolically trace the non-traceable code +Skip symbolically trace the non-traceable code --------------------------------------------------- When we have some non-traceable code in the module, and this part of code doesn’t need to be quantized, we can factor out this part of the code into a submodule and skip symbolically trace that submodule. @@ -134,8 +137,7 @@ quantization code: If the code that is not symbolically traceable needs to be quantized, we have the following two options: -########################################################## -2.a Refactor your code to make it symbolically traceable +Refactor your code to make it symbolically traceable -------------------------------------------------------- If it is easy to refactor the code and make the code symbolically traceable, we can refactor the code and remove the use of non-traceable constructs in python. @@ -167,15 +169,10 @@ after: return x.permute(0, 2, 1, 3) -quantization code: - This can be combined with other approaches and the quantization code depends on the model. - - -####################################################### -2.b. Write your own observed and quantized submodule +Write your own observed and quantized submodule ----------------------------------------------------- If the non-traceable code can’t be refactored to be symbolically traceable, @@ -207,8 +204,8 @@ non-traceable logic, wrapped in a module class FP32NonTraceable: ... - -2. Define observed version of FP32NonTraceable +2. Define observed version of +FP32NonTraceable .. code:: python From c5501e78a19f7cae71cc91fb5a9ead1c283e9ee3 Mon Sep 17 00:00:00 2001 From: Mariia Mykhailova Date: Thu, 1 Jun 2023 08:49:51 -0700 Subject: [PATCH 09/67] Redirect "Finetuning Torchvision Models" to "TorchVision Object Detection Finetuning Tutorial" (#2378) --- .../finetuning_torchvision_models_tutorial.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 beginner_source/finetuning_torchvision_models_tutorial.rst diff --git a/beginner_source/finetuning_torchvision_models_tutorial.rst b/beginner_source/finetuning_torchvision_models_tutorial.rst new file mode 100644 index 00000000000..711f4b0f99b --- /dev/null +++ b/beginner_source/finetuning_torchvision_models_tutorial.rst @@ -0,0 +1,10 @@ +Finetuning Torchvision Models +============================= + +This tutorial has been moved to https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html + +It will redirect in 3 seconds. + +.. raw:: html + + From 9633e5f141eefbe62e5dcb8168b9e34d505058d0 Mon Sep 17 00:00:00 2001 From: Sergii Dymchenko Date: Thu, 1 Jun 2023 09:31:10 -0700 Subject: [PATCH 10/67] Fix docathon-label-sync.py to not fail on PRs without description (#2379) See https://github.com/pytorch/tutorials/actions/runs/5140794478/jobs/9252588225?pr=2377 as an example --- .github/scripts/docathon-label-sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/scripts/docathon-label-sync.py b/.github/scripts/docathon-label-sync.py index 597f4b5e034..5da80f24f5b 100644 --- a/.github/scripts/docathon-label-sync.py +++ b/.github/scripts/docathon-label-sync.py @@ -14,6 +14,9 @@ def main(): repo = g.get_repo(f'{repo_owner}/{repo_name}') pull_request = repo.get_pull(pull_request_number) pull_request_body = pull_request.body + # PR without description + if pull_request_body is None: + return # get issue number from the PR body if not re.search(r'#\d{1,5}', pull_request_body): From d9fd5bae719632632f96865bc198dd266905bacc Mon Sep 17 00:00:00 2001 From: Qasim Khan Date: Thu, 1 Jun 2023 21:39:27 +0500 Subject: [PATCH 11/67] Change batchify desc to remove ambiguity (#2383) Co-authored-by: Carl Parker --- beginner_source/transformer_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index 57d1f8d8591..cce52eefdb3 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -149,7 +149,7 @@ def forward(self, x: Tensor) -> Tensor: # into ``batch_size`` columns. If the data does not divide evenly into # ``batch_size`` columns, then the data is trimmed to fit. For instance, with # the alphabet as the data (total length of 26) and ``batch_size=4``, we would -# divide the alphabet into 4 sequences of length 6: +# divide the alphabet into sequences of length 6, resulting in 4 of such sequences. # # .. math:: # \begin{bmatrix} From 4cd44ae2dd4cfdd5f923302d4e6af234b5af0ece Mon Sep 17 00:00:00 2001 From: Kiersten Stokes Date: Thu, 1 Jun 2023 12:19:25 -0500 Subject: [PATCH 12/67] Change formatting of code blocks for correct rendering in Colab (#2398) --- .../tensorboard_profiler_tutorial.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/intermediate_source/tensorboard_profiler_tutorial.py b/intermediate_source/tensorboard_profiler_tutorial.py index 440f2257e1a..2b241071b7f 100644 --- a/intermediate_source/tensorboard_profiler_tutorial.py +++ b/intermediate_source/tensorboard_profiler_tutorial.py @@ -18,7 +18,7 @@ ----- To install ``torch`` and ``torchvision`` use the following command: -:: +.. code-block:: pip install torch torchvision @@ -160,7 +160,7 @@ def train(data): # # Install PyTorch Profiler TensorBoard Plugin. # -# :: +# .. code-block:: # # pip install torch_tb_profiler # @@ -168,7 +168,7 @@ def train(data): ###################################################################### # Launch the TensorBoard. # -# :: +# .. code-block:: # # tensorboard --logdir=./log # @@ -176,7 +176,7 @@ def train(data): ###################################################################### # Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser. # -# :: +# .. code-block:: # # http://localhost:6006/#pytorch_profiler # @@ -287,7 +287,7 @@ def train(data): # In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below, # pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again. # -# :: +# .. code-block:: # # train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4) # @@ -316,7 +316,7 @@ def train(data): # # You can try it by using existing example on Azure # -# :: +# .. code-block:: # # pip install azure-storage-blob # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo_1_10 @@ -366,7 +366,7 @@ def train(data): # # You can try it by using existing example on Azure: # -# :: +# .. code-block:: # # pip install azure-storage-blob # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert From 7e72b705cb7d6057cae513c1259e144b2a99e887 Mon Sep 17 00:00:00 2001 From: Mateusz Nowak <37732935+noqqaqq@users.noreply.github.com> Date: Thu, 1 Jun 2023 19:34:47 +0200 Subject: [PATCH 13/67] README.txt - fix unreachable link (#2386) Co-authored-by: sekyondaMeta <127536312+sekyondaMeta@users.noreply.github.com> --- prototype_source/README.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/prototype_source/README.txt b/prototype_source/README.txt index 94c182dcca0..4ab9ce8f6a9 100644 --- a/prototype_source/README.txt +++ b/prototype_source/README.txt @@ -1,8 +1,8 @@ Prototype Tutorials ------------------ 1. distributed_rpc_profiling.rst - Profiling PyTorch RPC-Based Workloads - https://github.com/pytorch/tutorials/blob/release/1.6/prototype_source/distributed_rpc_profiling.rst + Profiling PyTorch RPC-Based Workloads + https://github.com/pytorch/tutorials/blob/main/prototype_source/distributed_rpc_profiling.rst 2. graph_mode_static_quantization_tutorial.py Graph Mode Post Training Static Quantization in PyTorch @@ -21,8 +21,8 @@ Prototype Tutorials https://github.com/pytorch/tutorials/blob/main/prototype_source/torchscript_freezing.py 6. vulkan_workflow.rst - Vulkan Backend User Workflow - https://pytorch.org/tutorials/intermediate/vulkan_workflow.html + Vulkan Backend User Workflow + https://pytorch.org/tutorials/intermediate/vulkan_workflow.html 7. fx_graph_mode_ptq_static.rst FX Graph Mode Post Training Static Quantization From 0be50f4e48ea4af0bc27dee5af936a41700e61fc Mon Sep 17 00:00:00 2001 From: Sergii Dymchenko Date: Thu, 1 Jun 2023 10:46:55 -0700 Subject: [PATCH 14/67] Fix typo in a PR template (#2377) --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 0392eb3a00d..8c3604b99fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,4 +8,4 @@ Fixes #ISSUE_NUMBER - [ ] The issue that is being fixed is referred in the description (see above "Fixes #ISSUE_NUMBER") - [ ] Only one issue is addressed in this pull request - [ ] Labels from the issue that this PR is fixing are added to this pull request -- [ ] No unnessessary issues are included into this pull request. +- [ ] No unnecessary issues are included into this pull request. From aa400c32d7c602d895116fbf92e5600264bd5616 Mon Sep 17 00:00:00 2001 From: Qasim Khan Date: Thu, 1 Jun 2023 23:41:27 +0500 Subject: [PATCH 15/67] Fixes module 'get_filesystem' error (#2397) * Add temporary fix for embeddings bug Co-authored-by: Svetlana Karslioglu --- beginner_source/introyt/tensorboardyt_tutorial.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/beginner_source/introyt/tensorboardyt_tutorial.py b/beginner_source/introyt/tensorboardyt_tutorial.py index 4c7c356fd0c..29e83066726 100644 --- a/beginner_source/introyt/tensorboardyt_tutorial.py +++ b/beginner_source/introyt/tensorboardyt_tutorial.py @@ -64,6 +64,13 @@ # PyTorch TensorBoard support from torch.utils.tensorboard import SummaryWriter +# In case you are using an environment that has TensorFlow installed, +# such as Google Colab, uncomment the following code to avoid +# a bug with saving embeddings to your TensorBoard directory + +# import tensorflow as tf +# import tensorboard as tb +# tf.io.gfile = tb.compat.tensorflow_stub.io.gfile ###################################################################### # Showing Images in TensorBoard From e2a7ab0f009cce4555f28c70711ee67ae85ad08a Mon Sep 17 00:00:00 2001 From: Kiersten Stokes Date: Thu, 1 Jun 2023 15:31:52 -0500 Subject: [PATCH 16/67] Clear plot at beginning of loop so that non-empty image renders (#2401) Co-authored-by: Svetlana Karslioglu --- intermediate_source/mario_rl_tutorial.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/intermediate_source/mario_rl_tutorial.py b/intermediate_source/mario_rl_tutorial.py index ff653d54c11..8d02f3daf34 100755 --- a/intermediate_source/mario_rl_tutorial.py +++ b/intermediate_source/mario_rl_tutorial.py @@ -711,17 +711,18 @@ def record(self, episode, epsilon, step): f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n" ) - for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]: - plt.plot(getattr(self, f"moving_avg_{metric}")) - plt.savefig(getattr(self, f"{metric}_plot")) + for metric in ["ep_lengths", "ep_avg_losses", "ep_avg_qs", "ep_rewards"]: plt.clf() + plt.plot(getattr(self, f"moving_avg_{metric}"), label=f"moving_avg_{metric}") + plt.legend() + plt.savefig(getattr(self, f"{metric}_plot")) ###################################################################### # Let’s play! # """"""""""""""" # -# In this example we run the training loop for 10 episodes, but for Mario to truly learn the ways of +# In this example we run the training loop for 40 episodes, but for Mario to truly learn the ways of # his world, we suggest running the loop for at least 40,000 episodes! # use_cuda = torch.cuda.is_available() @@ -735,7 +736,7 @@ def record(self, episode, epsilon, step): logger = MetricLogger(save_dir) -episodes = 10 +episodes = 40 for e in range(episodes): state = env.reset() From e1ec4bdaa7bbf167a354ba4c7aab8f17c1831bf7 Mon Sep 17 00:00:00 2001 From: Hemanth Sai <73033596+HemanthSai7@users.noreply.github.com> Date: Fri, 2 Jun 2023 02:31:26 +0530 Subject: [PATCH 17/67] Optimize DataLoader iteration in WrappedDataLoader (#2375) --- beginner_source/nn_tutorial.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beginner_source/nn_tutorial.py b/beginner_source/nn_tutorial.py index 7ee7df3b435..183aca1748b 100644 --- a/beginner_source/nn_tutorial.py +++ b/beginner_source/nn_tutorial.py @@ -795,8 +795,7 @@ def __len__(self): return len(self.dl) def __iter__(self): - batches = iter(self.dl) - for b in batches: + for b in self.dl: yield (self.func(*b)) train_dl, valid_dl = get_data(train_ds, valid_ds, bs) From d07875659aa9ca8b092344a2ecc487d4b3309ff3 Mon Sep 17 00:00:00 2001 From: TheMemoryDealer <32904619+TheMemoryDealer@users.noreply.github.com> Date: Thu, 1 Jun 2023 22:47:44 +0100 Subject: [PATCH 18/67] Patch 3 (#2389) * Updates #836 as suggested in https://github.com/pytorch/pytorch/issues/16885#issuecomment-551779897 --- beginner_source/former_torchies/parallelism_tutorial.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/beginner_source/former_torchies/parallelism_tutorial.py b/beginner_source/former_torchies/parallelism_tutorial.py index 18c14c43167..a11d844e1bd 100644 --- a/beginner_source/former_torchies/parallelism_tutorial.py +++ b/beginner_source/former_torchies/parallelism_tutorial.py @@ -53,7 +53,10 @@ def forward(self, x): class MyDataParallel(nn.DataParallel): def __getattr__(self, name): - return getattr(self.module, name) + try: + return super().__getattr__(name) + except AttributeError: + return getattr(self.module, name) ######################################################################## # **Primitives on which DataParallel is implemented upon:** From 56a2faf3a561cff3a7e98675a7d2080d84e30f96 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 1 Jun 2023 17:30:45 -0500 Subject: [PATCH 19/67] Address Err in char_rnn tutorial issue (#2374) * address bug; do a little editing Signed-off-by: Mike Brown * Update intermediate_source/char_rnn_classification_tutorial.py Signed-off-by: Mike Brown Co-authored-by: Svetlana Karslioglu --- .../char_rnn_classification_tutorial.py | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/intermediate_source/char_rnn_classification_tutorial.py b/intermediate_source/char_rnn_classification_tutorial.py index 9b1f255a51b..0c0aa3e988b 100644 --- a/intermediate_source/char_rnn_classification_tutorial.py +++ b/intermediate_source/char_rnn_classification_tutorial.py @@ -4,11 +4,14 @@ ************************************************************** **Author**: `Sean Robertson `_ -We will be building and training a basic character-level RNN to classify -words. This tutorial, along with the following two, show how to do -preprocess data for NLP modeling "from scratch", in particular not using -many of the convenience functions of `torchtext`, so you can see how -preprocessing for NLP modeling works at a low level. +We will be building and training a basic character-level Recurrent Neural +Network (RNN) to classify words. This tutorial, along with two other +Natural Language Processing (NLP) "from scratch" tutorials +:doc:`/intermediate/char_rnn_generation_tutorial` and +:doc:`/intermediate/seq2seq_translation_tutorial`, show how to +preprocess data to model NLP. In particular these tutorials do not +use many of the convenience functions of `torchtext`, so you can see how +preprocessing to model NLP works at a low level. A character-level RNN reads words as a series of characters - outputting a prediction and "hidden state" at each step, feeding its @@ -32,13 +35,15 @@ (-2.68) Dutch -**Recommended Reading:** +Recommended Preparation +======================= -I assume you have at least installed PyTorch, know Python, and -understand Tensors: +Before starting this tutorial it is recommended that you have installed PyTorch, +and have a basic understanding of Python programming language and Tensors: - https://pytorch.org/ For installation instructions - :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general + and learn the basics of Tensors - :doc:`/beginner/pytorch_with_examples` for a wide and deep overview - :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user @@ -181,10 +186,6 @@ def lineToTensor(line): # is just 2 linear layers which operate on an input and hidden state, with # a ``LogSoftmax`` layer after the output. # -# .. figure:: https://i.imgur.com/Z2xbySO.png -# :alt: -# -# import torch.nn as nn @@ -195,13 +196,13 @@ def __init__(self, input_size, hidden_size, output_size): self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) - self.i2o = nn.Linear(input_size + hidden_size, output_size) + self.h2o = nn.Linear(hidden_size, output_size) self.softmax = nn.LogSoftmax(dim=1) def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) - output = self.i2o(combined) + output = self.h2o(hidden) output = self.softmax(output) return output, hidden From 9b5405667b99c37c990981c9646383e46ad39a79 Mon Sep 17 00:00:00 2001 From: zabboud <91271094+zabboud@users.noreply.github.com> Date: Thu, 1 Jun 2023 18:44:00 -0400 Subject: [PATCH 20/67] Fixes #2083 - explain model.eval, torch.no_grad (#2400) Co-authored-by: Svetlana Karslioglu --- beginner_source/basics/optimization_tutorial.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/beginner_source/basics/optimization_tutorial.py b/beginner_source/basics/optimization_tutorial.py index 0fb508d1ccc..a1603510b96 100644 --- a/beginner_source/basics/optimization_tutorial.py +++ b/beginner_source/basics/optimization_tutorial.py @@ -149,6 +149,9 @@ def forward(self, x): def train_loop(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) + # Set the model to training mode - important for batch normalization and dropout layers + # Unnecessary in this situation but added for best practices + model.train() for batch, (X, y) in enumerate(dataloader): # Compute prediction and loss pred = model(X) @@ -165,10 +168,15 @@ def train_loop(dataloader, model, loss_fn, optimizer): def test_loop(dataloader, model, loss_fn): + # Set the model to evaluation mode - important for batch normalization and dropout layers + # Unnecessary in this situation but added for best practices + model.eval() size = len(dataloader.dataset) num_batches = len(dataloader) test_loss, correct = 0, 0 + # Evaluating the model with torch.no_grad() ensures that no gradients are computed during test mode + # also serves to reduce unnecessary gradient computations and memory usage for tensors with requires_grad=True with torch.no_grad(): for X, y in dataloader: pred = model(X) From d41e23baf7efb3f0ce5f1839ef43b75a1ceb8aa4 Mon Sep 17 00:00:00 2001 From: Qasim Khan Date: Fri, 2 Jun 2023 03:56:59 +0500 Subject: [PATCH 21/67] Copy float_model using load_model (#2385) --- prototype_source/fx_graph_mode_ptq_static.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/prototype_source/fx_graph_mode_ptq_static.rst b/prototype_source/fx_graph_mode_ptq_static.rst index f97b1f0a5f2..091673ed2e4 100644 --- a/prototype_source/fx_graph_mode_ptq_static.rst +++ b/prototype_source/fx_graph_mode_ptq_static.rst @@ -214,9 +214,9 @@ Download the `torchvision resnet18 model Date: Thu, 1 Jun 2023 23:04:55 -0400 Subject: [PATCH 22/67] resolve issue 1818 by modifying mean and standard deviation in the transforms.Normalize (#2405) * Fixes #2083 - explain model.eval, torch.no_grad * set norm to mean & std of CIFAR10(pytorch#1818) --------- Co-authored-by: Svetlana Karslioglu --- beginner_source/introyt/introyt1_tutorial.py | 23 ++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/beginner_source/introyt/introyt1_tutorial.py b/beginner_source/introyt/introyt1_tutorial.py index f52c3902c03..a5d65bcab16 100644 --- a/beginner_source/introyt/introyt1_tutorial.py +++ b/beginner_source/introyt/introyt1_tutorial.py @@ -288,7 +288,7 @@ def num_flat_features(self, x): transform = transforms.Compose( [transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))]) ########################################################################## @@ -297,9 +297,28 @@ def num_flat_features(self, x): # - ``transforms.ToTensor()`` converts images loaded by Pillow into # PyTorch tensors. # - ``transforms.Normalize()`` adjusts the values of the tensor so -# that their average is zero and their standard deviation is 0.5. Most +# that their average is zero and their standard deviation is 1.0. Most # activation functions have their strongest gradients around x = 0, so # centering our data there can speed learning. +# The values passed to the transform are the means (first tuple) and the +# standard deviations (second tuple) of the rgb values of the images in +# the dataset. You can calculate these values yourself by running these +# few lines of code: +# ``` +# from torch.utils.data import ConcatDataset +# transform = transforms.Compose([transforms.ToTensor()]) +# trainset = torchvision.datasets.CIFAR10(root='./data', train=True, +# download=True, transform=transform) +# +# #stack all train images together into a tensor of shape +# #(50000, 3, 32, 32) +# x = torch.stack([sample[0] for sample in ConcatDataset([trainset])]) +# +# #get the mean of each channel +# mean = torch.mean(x, dim=(0,2,3)) #tensor([0.4914, 0.4822, 0.4465]) +# std = torch.std(x, dim=(0,2,3)) #tensor([0.2470, 0.2435, 0.2616]) +# +# ``` # # There are many more transforms available, including cropping, centering, # rotation, and reflection. From 64dc7022385a579a3afa809f2a44b1ccee1eaa27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Luis=20Castro=20Garc=C3=ADa?= <81191337+JoseLuisC99@users.noreply.github.com> Date: Fri, 2 Jun 2023 08:18:16 -0600 Subject: [PATCH 23/67] Replace usage of copy.deepcopy() in Computer Vision Transfer Learning Tutorial, resolves issue #2332 (#2404) --- beginner_source/transfer_learning_tutorial.py | 128 +++++++++--------- 1 file changed, 66 insertions(+), 62 deletions(-) diff --git a/beginner_source/transfer_learning_tutorial.py b/beginner_source/transfer_learning_tutorial.py index b4460bb4fb2..b09efc11749 100644 --- a/beginner_source/transfer_learning_tutorial.py +++ b/beginner_source/transfer_learning_tutorial.py @@ -46,7 +46,7 @@ import matplotlib.pyplot as plt import time import os -import copy +from tempfile import TemporaryDirectory cudnn.benchmark = True plt.ion() # interactive mode @@ -146,67 +146,71 @@ def imshow(inp, title=None): def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() - best_model_wts = copy.deepcopy(model.state_dict()) - best_acc = 0.0 - - for epoch in range(num_epochs): - print(f'Epoch {epoch}/{num_epochs - 1}') - print('-' * 10) - - # Each epoch has a training and validation phase - for phase in ['train', 'val']: - if phase == 'train': - model.train() # Set model to training mode - else: - model.eval() # Set model to evaluate mode - - running_loss = 0.0 - running_corrects = 0 - - # Iterate over data. - for inputs, labels in dataloaders[phase]: - inputs = inputs.to(device) - labels = labels.to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward - # track history if only in train - with torch.set_grad_enabled(phase == 'train'): - outputs = model(inputs) - _, preds = torch.max(outputs, 1) - loss = criterion(outputs, labels) - - # backward + optimize only if in training phase - if phase == 'train': - loss.backward() - optimizer.step() - - # statistics - running_loss += loss.item() * inputs.size(0) - running_corrects += torch.sum(preds == labels.data) - if phase == 'train': - scheduler.step() - - epoch_loss = running_loss / dataset_sizes[phase] - epoch_acc = running_corrects.double() / dataset_sizes[phase] - - print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}') - - # deep copy the model - if phase == 'val' and epoch_acc > best_acc: - best_acc = epoch_acc - best_model_wts = copy.deepcopy(model.state_dict()) - - print() - - time_elapsed = time.time() - since - print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s') - print(f'Best val Acc: {best_acc:4f}') - - # load best model weights - model.load_state_dict(best_model_wts) + # Create a temporary directory to save training checkpoints + with TemporaryDirectory() as tempdir: + best_model_params_path = os.path.join(tempdir, 'best_model_params.pt') + + torch.save(model.state_dict(), best_model_params_path) + best_acc = 0.0 + + for epoch in range(num_epochs): + print(f'Epoch {epoch}/{num_epochs - 1}') + print('-' * 10) + + # Each epoch has a training and validation phase + for phase in ['train', 'val']: + if phase == 'train': + model.train() # Set model to training mode + else: + model.eval() # Set model to evaluate mode + + running_loss = 0.0 + running_corrects = 0 + + # Iterate over data. + for inputs, labels in dataloaders[phase]: + inputs = inputs.to(device) + labels = labels.to(device) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + # track history if only in train + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + loss = criterion(outputs, labels) + + # backward + optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + + # statistics + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + if phase == 'train': + scheduler.step() + + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] + + print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}') + + # deep copy the model + if phase == 'val' and epoch_acc > best_acc: + best_acc = epoch_acc + torch.save(model.state_dict(), best_model_params_path) + + print() + + time_elapsed = time.time() - since + print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s') + print(f'Best val Acc: {best_acc:4f}') + + # load best model weights + model.load_state_dict(torch.load(best_model_params_path)) return model From 5b804b84f1877ce6ac13ebfb8c15d114c0e5743a Mon Sep 17 00:00:00 2001 From: arunppsg Date: Fri, 2 Jun 2023 20:06:06 +0530 Subject: [PATCH 24/67] fix cropping to include last column and last row (#2384) --- beginner_source/data_loading_tutorial.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beginner_source/data_loading_tutorial.py b/beginner_source/data_loading_tutorial.py index 322d9b3009c..3afb7dffa84 100644 --- a/beginner_source/data_loading_tutorial.py +++ b/beginner_source/data_loading_tutorial.py @@ -268,8 +268,8 @@ def __call__(self, sample): h, w = image.shape[:2] new_h, new_w = self.output_size - top = np.random.randint(0, h - new_h) - left = np.random.randint(0, w - new_w) + top = np.random.randint(0, h - new_h + 1) + left = np.random.randint(0, w - new_w + 1) image = image[top: top + new_h, left: left + new_w] @@ -294,7 +294,7 @@ def __call__(self, sample): ###################################################################### # .. note:: -# In the example above, `RandomCrop` uses an external library's random number generator +# In the example above, `RandomCrop` uses an external library's random number generator # (in this case, Numpy's `np.random.int`). This can result in unexpected behavior with `DataLoader` # (see `here `_). # In practice, it is safer to stick to PyTorch's random number generator, e.g. by using `torch.randint` instead. From fd9a6a7f5fff58b00a0b83c13c96a70cbf513f98 Mon Sep 17 00:00:00 2001 From: Mateusz Nowak <37732935+noqqaqq@users.noreply.github.com> Date: Fri, 2 Jun 2023 17:45:11 +0200 Subject: [PATCH 25/67] Enumerate over dataset instead of simple loop (#2407) Co-authored-by: noqqaqq Co-authored-by: Nicolas Hug --- beginner_source/data_loading_tutorial.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/beginner_source/data_loading_tutorial.py b/beginner_source/data_loading_tutorial.py index 3afb7dffa84..d5326f6e9a6 100644 --- a/beginner_source/data_loading_tutorial.py +++ b/beginner_source/data_loading_tutorial.py @@ -165,9 +165,7 @@ def __getitem__(self, idx): fig = plt.figure() -for i in range(len(face_dataset)): - sample = face_dataset[i] - +for i, sample in enumerate(face_dataset): print(i, sample['image'].shape, sample['landmarks'].shape) ax = plt.subplot(1, 4, i + 1) @@ -356,9 +354,7 @@ def __call__(self, sample): ToTensor() ])) -for i in range(len(transformed_dataset)): - sample = transformed_dataset[i] - +for i, sample in enumerate(transformed_dataset): print(i, sample['image'].size(), sample['landmarks'].size()) if i == 3: From b966c1fc9ff17eb0da60b5c7546a83589e22831d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Luis=20Castro=20Garc=C3=ADa?= <81191337+JoseLuisC99@users.noreply.github.com> Date: Fri, 2 Jun 2023 10:33:11 -0600 Subject: [PATCH 26/67] Implement function for BERT quantization tutorial, resolves issue #1971 (#2403) Co-authored-by: Carl Parker --- .../dynamic_quantization_bert_tutorial.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/intermediate_source/dynamic_quantization_bert_tutorial.rst b/intermediate_source/dynamic_quantization_bert_tutorial.rst index 53ac2cd0afb..39cff5a22c5 100644 --- a/intermediate_source/dynamic_quantization_bert_tutorial.rst +++ b/intermediate_source/dynamic_quantization_bert_tutorial.rst @@ -255,6 +255,9 @@ model before and after the dynamic quantization. torch.manual_seed(seed) set_seed(42) + # Initialize a global random number generator + global_rng = random.Random() + 2.2 Load the fine-tuned BERT model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -525,6 +528,21 @@ We can serialize and save the quantized model for the future use using .. code:: python + def ids_tensor(shape, vocab_size, rng=None, name=None): + # Creates a random int32 tensor of the shape within the vocab size + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.randint(0, vocab_size - 1)) + + return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous() + input_ids = ids_tensor([8, 128], 2) token_type_ids = ids_tensor([8, 128], 2) attention_mask = ids_tensor([8, 128], vocab_size=2) From 769cff98ea01d67b9f82910c4be2e095b31deb46 Mon Sep 17 00:00:00 2001 From: Alok Kumar Jha <92216931+akjalok@users.noreply.github.com> Date: Fri, 2 Jun 2023 22:40:07 +0530 Subject: [PATCH 27/67] Fix the loss initialization in intermediate_source/char_rnn_generation_tutorial.py (#2380) * changed the loss init to make it less confusing --------- Co-authored-by: Nicolas Hug Co-authored-by: Svetlana Karslioglu --- intermediate_source/char_rnn_generation_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intermediate_source/char_rnn_generation_tutorial.py b/intermediate_source/char_rnn_generation_tutorial.py index 6068c84cd0e..d0c1c553865 100644 --- a/intermediate_source/char_rnn_generation_tutorial.py +++ b/intermediate_source/char_rnn_generation_tutorial.py @@ -278,7 +278,7 @@ def train(category_tensor, input_line_tensor, target_line_tensor): rnn.zero_grad() - loss = 0 + loss = torch.Tensor([0]) # you can also just simply use ``loss = 0`` for i in range(input_line_tensor.size(0)): output, hidden = rnn(category_tensor, input_line_tensor[i], hidden) From 83cbc8de29a9ad40aaffb782206a316f8966a257 Mon Sep 17 00:00:00 2001 From: Youshaa Murhij Date: Fri, 2 Jun 2023 20:19:13 +0300 Subject: [PATCH 28/67] Update transformer_tutorial.py | Resolving issue #1778 (#2402) * Update transformer_tutorial.py Add description for positional encoding calculation for Transformers * Update Positional Encoding description in transformer_tutorial.py * Update transformer_tutorial.py --------- Co-authored-by: Carl Parker --- beginner_source/transformer_tutorial.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index cce52eefdb3..5ed9a0d1390 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -103,6 +103,15 @@ def generate_square_subsequent_mask(sz: int) -> Tensor: # positional encodings have the same dimension as the embeddings so that # the two can be summed. Here, we use ``sine`` and ``cosine`` functions of # different frequencies. +# The ``div_term`` in the code is calculated as +# ``torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))``. +# This calculation is based on the original Transformer paper’s formulation +# for positional encoding. The purpose of this calculation is to create +# a range of values that decrease exponentially. +# This allows the model to learn to attend to positions based on their relative distances. +# The ``math.log(10000.0)`` term in the exponent represents the maximum effective +# input length (in this case, ``10000``). Dividing this term by ``d_model`` scales +# the values to be within a reasonable range for the exponential function. # class PositionalEncoding(nn.Module): From 420037e77a0d3dd8fc7952d48c7be2c591b4e625 Mon Sep 17 00:00:00 2001 From: TheMemoryDealer <32904619+TheMemoryDealer@users.noreply.github.com> Date: Fri, 2 Jun 2023 21:27:30 +0100 Subject: [PATCH 29/67] Fix run_demo(demo_model_parallel, world_size) issue (#2367) In the function demo_model_parallel, dev0 and dev1 are computed in a way that assigns two distinct GPUs to each process. This is achieved by doubling the rank and applying modulus operation with twice the world_size. Assuming 8 gpus world_size is set to 4, leading to the creation of 4 processes. Each of these processes is allocated two distinct GPUs. For instance, the first process (process 0) is assigned GPUs 0 and 1, the second process (process 1) is assigned GPUs 2 and 3, and so forth. --- intermediate_source/ddp_tutorial.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/intermediate_source/ddp_tutorial.rst b/intermediate_source/ddp_tutorial.rst index 366db8db130..a8955569df5 100644 --- a/intermediate_source/ddp_tutorial.rst +++ b/intermediate_source/ddp_tutorial.rst @@ -269,8 +269,8 @@ either the application or the model ``forward()`` method. setup(rank, world_size) # setup mp_model and devices for this process - dev0 = (rank * 2) % world_size - dev1 = (rank * 2 + 1) % world_size + dev0 = rank * 2 + dev1 = rank * 2 + 1 mp_model = ToyMpModel(dev0, dev1) ddp_mp_model = DDP(mp_model) @@ -293,6 +293,7 @@ either the application or the model ``forward()`` method. world_size = n_gpus run_demo(demo_basic, world_size) run_demo(demo_checkpoint, world_size) + world_size = n_gpus//2 run_demo(demo_model_parallel, world_size) Initialize DDP with torch.distributed.run/torchrun From 4648254675e467170e17374a4bf954291e3ce819 Mon Sep 17 00:00:00 2001 From: Qasim Khan Date: Sat, 3 Jun 2023 01:28:34 +0500 Subject: [PATCH 30/67] Fix dependencies and kernel crash in captumyt.py (#2408) * Update captum dependencies (matplotlib and flask-compress) * Use resnet18 due to RAM limitation Google Colab crashes due to insufficient RAM (more than 12 GB is required) if resnet101 or resnet50 are used. Thus, resnet18 is used instead (approximately 6 GB is used). --- beginner_source/introyt/captumyt.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/beginner_source/introyt/captumyt.py b/beginner_source/introyt/captumyt.py index 2ff8e9e70b1..cf63b6109b6 100644 --- a/beginner_source/introyt/captumyt.py +++ b/beginner_source/introyt/captumyt.py @@ -98,21 +98,24 @@ Before you get started, you need to have a Python environment with: - Python version 3.6 or higher -- For the Captum Insights example, Flask 1.1 or higher +- For the Captum Insights example, Flask 1.1 or higher and Flask-Compress + (the latest version is recommended) - PyTorch version 1.2 or higher (the latest version is recommended) - TorchVision version 0.6 or higher (the latest version is recommended) - Captum (the latest version is recommended) +- Matplotlib version 3.3.4, since Captum currently uses a Matplotlib + function whose arguments have been renamed in later versions To install Captum in an Anaconda or pip virtual environment, use the appropriate command for your environment below: With ``conda``:: - conda install pytorch torchvision captum -c pytorch + conda install pytorch torchvision captum flask-compress matplotlib=3.3.4 -c pytorch With ``pip``:: - pip install torch torchvision captum + pip install torch torchvision captum matplotlib==3.3.4 Flask-Compress Restart this notebook in the environment you set up, and you’re ready to go! @@ -155,7 +158,7 @@ # now. # -model = models.resnet101(weights='IMAGENET1K_V1') +model = models.resnet18(weights='IMAGENET1K_V1') model = model.eval() From fa9be972d2d7c4953c3927365a033f9de74cfbfb Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Fri, 2 Jun 2023 14:45:07 -0700 Subject: [PATCH 31/67] Set global device back to cpu at the end of tutorial (#2411) We are using sphinx to render those tutorials, which does not start a new process to render, so one needs to restore global state to default value, by calling `torch.set_default_device('cpu')` Co-authored-by: Svetlana Karslioglu --- recipes_source/recipes/changing_default_device.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/recipes_source/recipes/changing_default_device.py b/recipes_source/recipes/changing_default_device.py index 103560fd743..f5e50b3f0be 100644 --- a/recipes_source/recipes/changing_default_device.py +++ b/recipes_source/recipes/changing_default_device.py @@ -43,6 +43,9 @@ print(mod.weight.device) print(mod(torch.randn(128, 20)).device) +# And then globally return it back to CPU +torch.set_default_device('cpu') + ################################################################ # This function imposes a slight performance cost on every Python # call to the torch API (not just factory functions). If this From 3a58c5197c343f8adf39ed4ec3f3bd0e666f3a6a Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Fri, 2 Jun 2023 15:07:09 -0700 Subject: [PATCH 32/67] Revert "Update transformer_tutorial.py | Resolving issue #1778 (#2402)" (#2412) This reverts commit 83cbc8de29a9ad40aaffb782206a316f8966a257. --- beginner_source/transformer_tutorial.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index 5ed9a0d1390..cce52eefdb3 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -103,15 +103,6 @@ def generate_square_subsequent_mask(sz: int) -> Tensor: # positional encodings have the same dimension as the embeddings so that # the two can be summed. Here, we use ``sine`` and ``cosine`` functions of # different frequencies. -# The ``div_term`` in the code is calculated as -# ``torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))``. -# This calculation is based on the original Transformer paper’s formulation -# for positional encoding. The purpose of this calculation is to create -# a range of values that decrease exponentially. -# This allows the model to learn to attend to positions based on their relative distances. -# The ``math.log(10000.0)`` term in the exponent represents the maximum effective -# input length (in this case, ``10000``). Dividing this term by ``d_model`` scales -# the values to be within a reasonable range for the exponential function. # class PositionalEncoding(nn.Module): From 9e001571138e4fee947934e8f8f24b92ee669ddb Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Fri, 2 Jun 2023 16:33:53 -0700 Subject: [PATCH 33/67] Make DCGan tutorial results reproducible (#2414) By using deterministic algorithm That should prevent repo size increase by 70Mb after every commit, but will make tutorial slightly slower (though not significantly) Also, remove unused/absolete imports --- beginner_source/dcgan_faces_tutorial.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beginner_source/dcgan_faces_tutorial.py b/beginner_source/dcgan_faces_tutorial.py index d98683741e5..1a1f9c38606 100644 --- a/beginner_source/dcgan_faces_tutorial.py +++ b/beginner_source/dcgan_faces_tutorial.py @@ -112,7 +112,6 @@ # will be explained in the coming sections. # -from __future__ import print_function #%matplotlib inline import argparse import os @@ -120,7 +119,6 @@ import torch import torch.nn as nn import torch.nn.parallel -import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset @@ -137,6 +135,7 @@ print("Random Seed: ", manualSeed) random.seed(manualSeed) torch.manual_seed(manualSeed) +torch.use_deterministic_algorithms(True) # Needed for reproducible results ###################################################################### From f1cb62c9cacc6d0f781dbca6b4c27007b6de42da Mon Sep 17 00:00:00 2001 From: BJ Hargrave Date: Mon, 5 Jun 2023 12:22:42 -0400 Subject: [PATCH 34/67] Remove improper src_mask from encoder tutorial (#2423) Fixes https://github.com/pytorch/tutorials/issues/1877 The tutorial is using a transformer encoder and the mask used was for masking a decoder which is not part of the tutorial. The mask is removed. Some variable names are changed to better reflect the purpose of the variable. Also, some unused imports are removed. Signed-off-by: BJ Hargrave --- beginner_source/transformer_tutorial.py | 45 ++++++++----------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index cce52eefdb3..a3fc3ab16eb 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -36,12 +36,8 @@ # of the word (see the next paragraph for more details). The # ``nn.TransformerEncoder`` consists of multiple layers of # `nn.TransformerEncoderLayer `__. -# Along with the input sequence, a square attention mask is required because the -# self-attention layers in ``nn.TransformerDecoder`` are only allowed to attend -# the earlier positions in the sequence. For the language modeling task, any -# tokens on the future positions should be masked. To produce a probability -# distribution over output words, the output of the ``nn.TransformerEncoder`` -# model is passed through a linear layer followed by a log-softmax function. +# To produce a probability distribution over output words, the output of +# the ``nn.TransformerEncoder`` model is passed through a linear layer. # import math @@ -51,7 +47,6 @@ import torch from torch import nn, Tensor -import torch.nn.functional as F from torch.nn import TransformerEncoder, TransformerEncoderLayer from torch.utils.data import dataset @@ -64,19 +59,19 @@ def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int, self.pos_encoder = PositionalEncoding(d_model, dropout) encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout) self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) - self.encoder = nn.Embedding(ntoken, d_model) + self.embedding = nn.Embedding(ntoken, d_model) self.d_model = d_model - self.decoder = nn.Linear(d_model, ntoken) + self.linear = nn.Linear(d_model, ntoken) self.init_weights() def init_weights(self) -> None: initrange = 0.1 - self.encoder.weight.data.uniform_(-initrange, initrange) - self.decoder.bias.data.zero_() - self.decoder.weight.data.uniform_(-initrange, initrange) + self.embedding.weight.data.uniform_(-initrange, initrange) + self.linear.bias.data.zero_() + self.linear.weight.data.uniform_(-initrange, initrange) - def forward(self, src: Tensor, src_mask: Tensor) -> Tensor: + def forward(self, src: Tensor, src_mask: Tensor = None) -> Tensor: """ Arguments: src: Tensor, shape ``[seq_len, batch_size]`` @@ -85,18 +80,13 @@ def forward(self, src: Tensor, src_mask: Tensor) -> Tensor: Returns: output Tensor of shape ``[seq_len, batch_size, ntoken]`` """ - src = self.encoder(src) * math.sqrt(self.d_model) + src = self.embedding(src) * math.sqrt(self.d_model) src = self.pos_encoder(src) output = self.transformer_encoder(src, src_mask) - output = self.decoder(output) + output = self.linear(output) return output -def generate_square_subsequent_mask(sz: int) -> Tensor: - """Generates an upper-triangular matrix of ``-inf``, with zeros on ``diag``.""" - return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1) - - ###################################################################### # ``PositionalEncoding`` module injects some information about the # relative or absolute position of the tokens in the sequence. The @@ -286,7 +276,6 @@ def get_batch(source: Tensor, i: int) -> Tuple[Tensor, Tensor]: # to prevent gradients from exploding. # -import copy import time criterion = nn.CrossEntropyLoss() @@ -299,16 +288,13 @@ def train(model: nn.Module) -> None: total_loss = 0. log_interval = 200 start_time = time.time() - src_mask = generate_square_subsequent_mask(bptt).to(device) num_batches = len(train_data) // bptt for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)): data, targets = get_batch(train_data, i) - seq_len = data.size(0) - if seq_len != bptt: # only on last batch - src_mask = src_mask[:seq_len, :seq_len] - output = model(data, src_mask) - loss = criterion(output.view(-1, ntokens), targets) + output = model(data) + output_flat = output.view(-1, ntokens) + loss = criterion(output_flat, targets) optimizer.zero_grad() loss.backward() @@ -330,14 +316,11 @@ def train(model: nn.Module) -> None: def evaluate(model: nn.Module, eval_data: Tensor) -> float: model.eval() # turn on evaluation mode total_loss = 0. - src_mask = generate_square_subsequent_mask(bptt).to(device) with torch.no_grad(): for i in range(0, eval_data.size(0) - 1, bptt): data, targets = get_batch(eval_data, i) seq_len = data.size(0) - if seq_len != bptt: - src_mask = src_mask[:seq_len, :seq_len] - output = model(data, src_mask) + output = model(data) output_flat = output.view(-1, ntokens) total_loss += seq_len * criterion(output_flat, targets).item() return total_loss / (len(eval_data) - 1) From 47b9ea4628764a19e867d8a0227a81fc2138c0bc Mon Sep 17 00:00:00 2001 From: clee2000 <44682903+clee2000@users.noreply.github.com> Date: Mon, 5 Jun 2023 11:26:48 -0700 Subject: [PATCH 35/67] Move from CircleCI to GHA (#2280) TODO: - Increase number of runner and see if it reduces the build time - Upload previews to the docs bucket --- .circleci/config.yml | 366 +------------------------- .circleci/config.yml.in | 213 --------------- .circleci/regenerate.py | 112 -------- .github/workflows/build-tutorials.yml | 182 +++++++++++++ .jenkins/build.sh | 38 +-- .jenkins/get_files_to_run.py | 8 +- 6 files changed, 203 insertions(+), 716 deletions(-) delete mode 100644 .circleci/config.yml.in delete mode 100644 .circleci/regenerate.py create mode 100644 .github/workflows/build-tutorials.yml diff --git a/.circleci/config.yml b/.circleci/config.yml index b7084096c4b..70b2c7fd5b0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -306,6 +306,10 @@ jobs: workflows: build: + when: + and: # All must be true to trigger + - equal: [ branch1, << pipeline.git.branch >> ] + - equal: [ branch2, << pipeline.git.branch >> ] jobs: # Build jobs that only run on PR - pytorch_tutorial_pr_build_worker_0: @@ -314,365 +318,3 @@ workflows: ignore: - master - main - - pytorch_tutorial_pr_build_worker_1: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_2: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_3: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_4: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_5: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_6: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_7: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_8: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_9: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_10: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_11: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_12: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_13: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_14: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_15: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_16: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_17: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_18: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_worker_19: - filters: - branches: - ignore: - - master - - main - - pytorch_tutorial_pr_build_manager: - filters: - branches: - ignore: - - master - - main - requires: - - pytorch_tutorial_pr_build_worker_0 - - pytorch_tutorial_pr_build_worker_1 - - pytorch_tutorial_pr_build_worker_2 - - pytorch_tutorial_pr_build_worker_3 - - pytorch_tutorial_pr_build_worker_4 - - pytorch_tutorial_pr_build_worker_5 - - pytorch_tutorial_pr_build_worker_6 - - pytorch_tutorial_pr_build_worker_7 - - pytorch_tutorial_pr_build_worker_8 - - pytorch_tutorial_pr_build_worker_9 - - pytorch_tutorial_pr_build_worker_10 - - pytorch_tutorial_pr_build_worker_11 - - pytorch_tutorial_pr_build_worker_12 - - pytorch_tutorial_pr_build_worker_13 - - pytorch_tutorial_pr_build_worker_14 - - pytorch_tutorial_pr_build_worker_15 - - pytorch_tutorial_pr_build_worker_16 - - pytorch_tutorial_pr_build_worker_17 - - pytorch_tutorial_pr_build_worker_18 - - pytorch_tutorial_pr_build_worker_19 - # Build jobs that only run on trunk - - pytorch_tutorial_trunk_build_worker_0: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_1: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_2: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_3: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_4: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_5: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_6: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_7: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_8: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_9: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_10: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_11: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_12: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_13: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_14: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_15: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_16: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_17: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_18: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_worker_19: - context: org-member - filters: - branches: - only: - - master - - main - - pytorch_tutorial_trunk_build_manager: - context: org-member - filters: - branches: - only: - - master - - main - requires: - - pytorch_tutorial_trunk_build_worker_0 - - pytorch_tutorial_trunk_build_worker_1 - - pytorch_tutorial_trunk_build_worker_2 - - pytorch_tutorial_trunk_build_worker_3 - - pytorch_tutorial_trunk_build_worker_4 - - pytorch_tutorial_trunk_build_worker_5 - - pytorch_tutorial_trunk_build_worker_6 - - pytorch_tutorial_trunk_build_worker_7 - - pytorch_tutorial_trunk_build_worker_8 - - pytorch_tutorial_trunk_build_worker_9 - - pytorch_tutorial_trunk_build_worker_10 - - pytorch_tutorial_trunk_build_worker_11 - - pytorch_tutorial_trunk_build_worker_12 - - pytorch_tutorial_trunk_build_worker_13 - - pytorch_tutorial_trunk_build_worker_14 - - pytorch_tutorial_trunk_build_worker_15 - - pytorch_tutorial_trunk_build_worker_16 - - pytorch_tutorial_trunk_build_worker_17 - - pytorch_tutorial_trunk_build_worker_18 - - pytorch_tutorial_trunk_build_worker_19 -# - pytorch_tutorial_windows_pr_build_worker_0: -# filters: -# branches: -# ignore: -# - master -# - main -# - pytorch_tutorial_windows_pr_build_worker_1: -# filters: -# branches: -# ignore: -# - master -# - main -# - pytorch_tutorial_windows_pr_build_worker_2: -# filters: -# branches: -# ignore: -# - master -# - main -# - pytorch_tutorial_windows_pr_build_worker_3: -# filters: -# branches: -# ignore: -# - master -# - main -# - pytorch_tutorial_windows_trunk_build_worker_0: -# context: org-member -# filters: -# branches: -# only: -# - master -# - main -# - pytorch_tutorial_windows_trunk_build_worker_1: -# context: org-member -# filters: -# branches: -# only: -# - master -# - main -# - pytorch_tutorial_windows_trunk_build_worker_2: -# context: org-member -# filters: -# branches: -# only: -# - master -# - main -# - pytorch_tutorial_windows_trunk_build_worker_3: -# context: org-member -# filters: -# branches: -# only: -# - master -# - main diff --git a/.circleci/config.yml.in b/.circleci/config.yml.in deleted file mode 100644 index 0694d221aad..00000000000 --- a/.circleci/config.yml.in +++ /dev/null @@ -1,213 +0,0 @@ -# run python regenerate.py to generate config.yml from config.yml.in - -version: 2.1 - -executors: - windows-with-nvidia-gpu: - machine: - resource_class: windows.gpu.nvidia.medium - image: windows-server-2019-nvidia:stable - shell: bash.exe - -install_official_git_client: &install_official_git_client - name: Install Official Git Client - no_output_timeout: "1h" - command: | - set -e - sudo apt-get -qq update - sudo apt-get -qq install openssh-client git - -# This system setup script is meant to run before the CI-related scripts, e.g., -# installing Git client, checking out code, setting up CI env, and -# building/testing. -setup_linux_system_environment: &setup_linux_system_environment - name: Set Up System Environment - no_output_timeout: "1h" - command: | - set -ex - - # Stop background apt updates. Hypothetically, the kill should not - # be necessary, because stop is supposed to send a kill signal to - # the process, but we've added it for good luck. Also - # hypothetically, it's supposed to be unnecessary to wait for - # the process to block. We also have that line for good luck. - # If you like, try deleting them and seeing if it works. - sudo systemctl stop apt-daily.service || true - sudo systemctl kill --kill-who=all apt-daily.service || true - - sudo systemctl stop unattended-upgrades.service || true - sudo systemctl kill --kill-who=all unattended-upgrades.service || true - - # wait until `apt-get update` has been killed - while systemctl is-active --quiet apt-daily.service - do - sleep 1; - done - while systemctl is-active --quiet unattended-upgrades.service - do - sleep 1; - done - - # See if we actually were successful - systemctl list-units --all | cat - - sudo apt-get purge -y unattended-upgrades - - cat /etc/apt/sources.list - - ps auxfww | grep [a]pt - ps auxfww | grep dpkg - -pytorch_tutorial_build_defaults: &pytorch_tutorial_build_defaults - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - steps: - - checkout - - run: - <<: *setup_linux_system_environment - - run: - name: Set Up CI Environment - no_output_timeout: "1h" - command: | - set -e - - sudo apt-get -y update - sudo apt-get -y install expect-dev moreutils - - sudo pip3 -q install awscli==1.16.35 - - if [ -n "${CUDA_VERSION}" ]; then - nvidia-smi - fi - - # This IAM user only allows read-write access to ECR - export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_ONLY} - export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_ONLY} - eval $(aws ecr get-login --region us-east-1 --no-include-email) - - run: - name: Build - no_output_timeout: "20h" - command: | - set -e - - # for some reason, pip installs it in a different place than what is looked at in the py file - sudo pip3 install requests --target=/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages - export pyTorchDockerImageTag=$(python3 .jenkins/get_docker_tag.py) - echo "PyTorchDockerImageTag: "${pyTorchDockerImageTag} - - cat >/home/circleci/project/ci_build_script.sh \</dev/null - if [ -n "${CUDA_VERSION}" ]; then - export id=$(docker run --gpus all -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}) - else - export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}) - fi - - echo "declare -x JOB_BASE_NAME=${CIRCLE_JOB}" > /home/circleci/project/env - echo "declare -x COMMIT_ID=${CIRCLE_SHA1}" >> /home/circleci/project/env - echo "declare -x COMMIT_SOURCE=${CIRCLE_BRANCH}" >> /home/circleci/project/env - # DANGER! DO NOT REMOVE THE `set +x` SETTING HERE! - set +x - if [[ "$CIRCLE_BRANCH" == master || "$CIRCLE_BRANCH" == main ]]; then - if [ -z "${CIRCLECI_AWS_ACCESS_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" ]; then exit 1; fi - if [ -z "${CIRCLECI_AWS_SECRET_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" ]; then exit 1; fi - if [ -z "${GITHUB_PYTORCHBOT_USERNAME}" ]; then exit 1; fi - if [ -z "${GITHUB_PYTORCHBOT_TOKEN}" ]; then exit 1; fi - - echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" >> /home/circleci/project/env - echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" >> /home/circleci/project/env - echo "declare -x GITHUB_PYTORCHBOT_USERNAME=${GITHUB_PYTORCHBOT_USERNAME}" >> /home/circleci/project/env - echo "declare -x GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" >> /home/circleci/project/env - else - echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PYTORCH_TUTORIAL_BUILD_PR_S3_BUCKET}" >> /home/circleci/project/env - echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PYTORCH_TUTORIAL_BUILD_PR_S3_BUCKET}" >> /home/circleci/project/env - fi - set -x - - echo 'rm /opt/cache/bin/*' | docker exec -u root -i "$id" bash - docker cp /home/circleci/project/. "$id:/var/lib/jenkins/workspace" - - export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_build_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' - echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts - # Copy docs with plot to a docs dir - if docker exec -it "$id" sh -c "test -d ./workspace/docs_with_plot/docs/"; then - mkdir /home/circleci/project/docs - docker cp "$id:/var/lib/jenkins/workspace/docs_with_plot/docs/." /home/circleci/project/docs - echo "Directory copied successfully" - else - echo "No docs_with_plot directory. Skipping..." - fi - - - store_artifacts: - path: ./docs - destination: tutorials - -pytorch_tutorial_build_worker_defaults: &pytorch_tutorial_build_worker_defaults - environment: - DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9" - CUDA_VERSION: "9" - resource_class: gpu.nvidia.small - <<: *pytorch_tutorial_build_defaults - -pytorch_tutorial_build_manager_defaults: &pytorch_tutorial_build_manager_defaults - environment: - DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9" - resource_class: medium - - - <<: *pytorch_tutorial_build_defaults -{% raw %} -pytorch_windows_build_worker: &pytorch_windows_build_worker - executor: windows-with-nvidia-gpu - steps: - - checkout - - run: - name: Install Cuda - no_output_timeout: 30m - command: | - .circleci/scripts/windows_cuda_install.sh - - run: - name: Generate cache key - # This will refresh cache on Sundays, build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - keys: - - data-{{ checksum "Makefile" }}-{{ checksum ".circleci-weekly" }} - - run: - name: test - no_output_timeout: "1h" - command: | - .circleci/scripts/build_for_windows.sh - - save_cache: - key: data-{{ checksum "Makefile" }}-{{ checksum ".circleci-weekly" }} - paths: - - advanced_source/data - - beginner_source/data - - intermediate_source/data - - prototype_source/data -{% endraw %} -jobs: - {{ jobs("pr") }} - - {{ jobs("trunk") }} - - {{ windows_jobs() }} - -workflows: - build: - jobs: - # Build jobs that only run on PR - {{ workflows_jobs("pr") }} - # Build jobs that only run on trunk - {{ workflows_jobs("trunk") }} -# {{ windows_workflows_jobs() }} diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py deleted file mode 100644 index f47ee1dfa6f..00000000000 --- a/.circleci/regenerate.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env python3 - -# regenrates config.yml based on config.yml.in - -from copy import deepcopy -import os.path - -import jinja2 -import yaml -from jinja2 import select_autoescape - -WORKFLOWS_JOBS_PR = {"filters": {"branches": {"ignore": ["master", "main"]}}} - -WORKFLOWS_JOBS_TRUNK = { - "context": "org-member", - "filters": {"branches": {"only": ["master", "main"]}}, -} - - -def indent(indentation, data_list): - return ("\n" + " " * indentation).join( - yaml.dump(data_list, default_flow_style=False).splitlines() - ) - - -def jobs(pr_or_trunk, num_workers=20, indentation=2): - jobs = {} - - # all tutorials that need gpu.nvidia.small.multi machines will be routed by - # get_files_to_run.py to 0th worker, similarly for gpu.nvidia.large and the - # 1st worker - needs_gpu_nvidia_small_multi = [0] - needs_gpu_nvidia_large = [1] - jobs[f"pytorch_tutorial_{pr_or_trunk}_build_manager"] = { - "<<": "*pytorch_tutorial_build_manager_defaults" - } - for i in range(num_workers): - job_info = {"<<": "*pytorch_tutorial_build_worker_defaults"} - if i in needs_gpu_nvidia_small_multi: - job_info["resource_class"] = "gpu.nvidia.small.multi" - if i in needs_gpu_nvidia_large: - job_info["resource_class"] = "gpu.nvidia.large" - jobs[f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}"] = job_info - - return indent(indentation, jobs).replace("'", "") - - -def workflows_jobs(pr_or_trunk, indentation=6, num_workers=20): - jobs = [] - job_info = deepcopy( - WORKFLOWS_JOBS_PR if pr_or_trunk == "pr" else WORKFLOWS_JOBS_TRUNK - ) - - for i in range(num_workers): - jobs.append( - {f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}": deepcopy(job_info)} - ) - - job_info["requires"] = [ - f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}" for i in range(num_workers) - ] - jobs.append({f"pytorch_tutorial_{pr_or_trunk}_build_manager": deepcopy(job_info)}) - return indent(indentation, jobs) - - -def windows_jobs(indentation=2, num_workers=4): - jobs = {} - for i in range(num_workers): - jobs[f"pytorch_tutorial_windows_pr_build_worker_{i}"] = { - "<<": "*pytorch_windows_build_worker" - } - jobs[f"pytorch_tutorial_windows_trunk_build_worker_{i}"] = { - "<<": "*pytorch_windows_build_worker" - } - return indent(indentation, jobs).replace("'", "") - - -def windows_workflows_jobs(indentation=6, num_workers=4): - jobs = [] - job_info = WORKFLOWS_JOBS_PR - for i in range(num_workers): - jobs.append( - {f"pytorch_tutorial_windows_pr_build_worker_{i}": deepcopy(job_info)} - ) - - job_info = WORKFLOWS_JOBS_TRUNK - for i in range(num_workers): - jobs.append( - {f"pytorch_tutorial_windows_trunk_build_worker_{i}": deepcopy(job_info)} - ) - - return ("\n#").join(indent(indentation, jobs).splitlines()) - - -if __name__ == "__main__": - - directory = os.path.dirname(__file__) - env = jinja2.Environment( - loader=jinja2.FileSystemLoader(directory), - lstrip_blocks=True, - autoescape=select_autoescape(enabled_extensions=("html", "xml")), - keep_trailing_newline=True, - ) - with open(os.path.join(directory, "config.yml"), "w") as f: - f.write( - env.get_template("config.yml.in").render( - jobs=jobs, - workflows_jobs=workflows_jobs, - windows_jobs=windows_jobs, - windows_workflows_jobs=windows_workflows_jobs, - ) - ) diff --git a/.github/workflows/build-tutorials.yml b/.github/workflows/build-tutorials.yml new file mode 100644 index 00000000000..222de26b9bb --- /dev/null +++ b/.github/workflows/build-tutorials.yml @@ -0,0 +1,182 @@ +name: Build tutorials + +on: + pull_request: + push: + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} + cancel-in-progress: true + +jobs: + worker: + name: pytorch_tutorial_build_worker + strategy: + matrix: + include: + - { shard: 1, num_shards: 6, runner: "linux.16xlarge.nvidia.gpu" } + - { shard: 2, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" } + - { shard: 3, num_shards: 6, runner: "linux.4xlarge.nvidia.gpu" } + - { shard: 4, num_shards: 6, runner: "linux.4xlarge.nvidia.gpu" } + - { shard: 5, num_shards: 6, runner: "linux.4xlarge.nvidia.gpu" } + - { shard: 6, num_shards: 6, runner: "linux.4xlarge.nvidia.gpu" } + fail-fast: false + runs-on: ${{ matrix.runner }} + env: + DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9" + CUDA_VERSION: "9" + steps: + - name: Setup SSH (Click me for login details) + uses: pytorch/test-infra/.github/actions/setup-ssh@main + with: + github-secret: ${{ secrets.GITHUB_TOKEN }} + instructions: | + All testing is done inside the container, to start an interactive session run: + docker exec -it $(docker container ps --format '{{.ID}}') bash + + - name: Checkout Tutorials + uses: actions/checkout@v3 + + - name: Setup Linux + uses: pytorch/pytorch/.github/actions/setup-linux@main + + - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG + uses: pytorch/test-infra/.github/actions/setup-nvidia@main + + - name: Calculate docker image + shell: bash + id: docker-image + run: | + set -ex + + # for some reason, pip installs it in a different place than what is looked at in the py file + pip3 install requests==2.26 + pyTorchDockerImageTag=$(python3 .jenkins/get_docker_tag.py) + + echo "docker-image=${DOCKER_IMAGE}:${pyTorchDockerImageTag}" >> "${GITHUB_OUTPUT}" + + - name: Pull docker image + uses: pytorch/test-infra/.github/actions/pull-docker-image@main + with: + docker-image: ${{ steps.docker-image.outputs.docker-image }} + + - name: Build + shell: bash + env: + DOCKER_IMAGE: ${{ steps.docker-image.outputs.docker-image }} + NUM_WORKERS: ${{ matrix.num_shards }} + WORKER_ID: ${{ matrix.shard }} + COMMIT_ID: ${{ github.sha }} + JOB_TYPE: worker + COMMIT_SOURCE: ${{ github.ref }} + run: | + set -ex + + chmod +x ".jenkins/build.sh" + + container_name=$(docker run \ + ${GPU_FLAG:-} \ + -e WORKER_ID \ + -e NUM_WORKERS \ + -e COMMIT_ID \ + -e JOB_TYPE \ + -e COMMIT_SOURCE \ + --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \ + --tty \ + --detach \ + --user jenkins \ + --name="${container_name}" \ + -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \ + -w /var/lib/jenkins/workspace \ + "${DOCKER_IMAGE}" + ) + + echo "rm /opt/cache/bin/*" | docker exec -u root -i "${container_name}" bash + + docker exec -t "${container_name}" sh -c ".jenkins/build.sh" + + - name: Teardown Linux + uses: pytorch/test-infra/.github/actions/teardown-linux@main + if: always() + + manager: + name: pytorch_tutorial_build_manager + needs: worker + runs-on: [self-hosted, linux.2xlarge] + env: + DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9" + CUDA_VERSION: "9" + steps: + - name: Setup SSH (Click me for login details) + uses: pytorch/test-infra/.github/actions/setup-ssh@main + with: + github-secret: ${{ secrets.GITHUB_TOKEN }} + instructions: | + All testing is done inside the container, to start an interactive session run: + docker exec -it $(docker container ps --format '{{.ID}}') bash + + - name: Checkout Tutorials + uses: actions/checkout@v3 + + - name: Setup Linux + uses: pytorch/pytorch/.github/actions/setup-linux@main + + - name: Calculate docker image + shell: bash + id: docker-image + run: | + set -ex + + # for some reason, pip installs it in a different place than what is looked at in the py file + pip3 install requests==2.26 + pyTorchDockerImageTag=$(python3 .jenkins/get_docker_tag.py) + + echo "docker-image=${DOCKER_IMAGE}:${pyTorchDockerImageTag}" >> "${GITHUB_OUTPUT}" + + - name: Pull docker image + uses: pytorch/test-infra/.github/actions/pull-docker-image@main + with: + docker-image: ${{ steps.docker-image.outputs.docker-image }} + + - name: Build + shell: bash + env: + DOCKER_IMAGE: ${{ steps.docker-image.outputs.docker-image }} + NUM_WORKERS: 6 + WORKER_ID: ${{ matrix.shard }} + COMMIT_ID: ${{ github.sha }} + JOB_TYPE: manager + COMMIT_SOURCE: ${{ github.ref }} + GITHUB_PYTORCHBOT_TOKEN: ${{ secrets.PYTORCHBOT_TOKEN }} + run: | + set -ex + + chmod +x ".jenkins/build.sh" + + container_name=$(docker run \ + ${GPU_FLAG:-} \ + -e WORKER_ID \ + -e NUM_WORKERS \ + -e COMMIT_ID \ + -e JOB_TYPE \ + -e COMMIT_SOURCE \ + -e GITHUB_PYTORCHBOT_TOKEN \ + --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \ + --tty \ + --detach \ + --user jenkins \ + --name="${container_name}" \ + -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \ + -w /var/lib/jenkins/workspace \ + "${DOCKER_IMAGE}" + ) + + echo "rm /opt/cache/bin/*" | docker exec -u root -i "${container_name}" bash + + docker exec -t "${container_name}" sh -c ".jenkins/build.sh" + + - name: Teardown Linux + uses: pytorch/test-infra/.github/actions/teardown-linux@main + if: always() diff --git a/.jenkins/build.sh b/.jenkins/build.sh index d09b0a8782a..f13966ff84b 100755 --- a/.jenkins/build.sh +++ b/.jenkins/build.sh @@ -1,10 +1,8 @@ +#!/bin/bash + set -ex -if [[ "$COMMIT_SOURCE" == master || "$COMMIT_SOURCE" == main ]]; then - export BUCKET_NAME=pytorch-tutorial-build-master -else - export BUCKET_NAME=pytorch-tutorial-build-pull-request -fi +export BUCKET_NAME=pytorch-tutorial-build-pull-request # set locale for click dependency in spacy export LC_ALL=C.UTF-8 @@ -25,7 +23,7 @@ pip install -r $DIR/../requirements.txt # Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html # RC Link # pip uninstall -y torch torchvision torchaudio torchtext -# pip install --pre --upgrade -f https://download.pytorch.org/whl/test/cu102/torch_test.html torch torchvision torchaudio torchtext +# pip install --pre --upgrade -f https://download.pytorch.org/whl/test/cu102/torch_test.html torch torchvision torchaudio torchtext # pip uninstall -y torch torchvision torchaudio torchtext # pip install --pre --upgrade -f https://download.pytorch.org/whl/test/cu116/torch_test.html torch torchdata torchvision torchaudio torchtext @@ -37,8 +35,7 @@ awsv2 -i awsv2 configure set default.s3.multipart_threshold 5120MB # Decide whether to parallelize tutorial builds, based on $JOB_BASE_NAME -export NUM_WORKERS=20 -if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then +if [[ "${JOB_TYPE}" == "worker" ]]; then # Step 1: Remove runnable code from tutorials that are not supposed to be run python $DIR/remove_runnable_code.py beginner_source/aws_distributed_training_tutorial.py beginner_source/aws_distributed_training_tutorial.py || true # python $DIR/remove_runnable_code.py advanced_source/ddp_pipeline_tutorial.py advanced_source/ddp_pipeline_tutorial.py || true @@ -47,7 +44,7 @@ if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then # python $DIR/remove_runnable_code.py intermediate_source/spatial_transformer_tutorial.py intermediate_source/spatial_transformer_tutorial.py || true # Temp remove for 1.10 release. # python $DIR/remove_runnable_code.py advanced_source/neural_style_tutorial.py advanced_source/neural_style_tutorial.py || true - + # TODO: Fix bugs in these tutorials to make them runnable again # python $DIR/remove_runnable_code.py beginner_source/audio_classifier_tutorial.py beginner_source/audio_classifier_tutorial.py || true @@ -56,7 +53,6 @@ if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then # Step 2: Keep certain tutorials based on file count, and remove runnable code in all other tutorials # IMPORTANT NOTE: We assume that each tutorial has a UNIQUE filename. - export WORKER_ID=$(echo "${JOB_BASE_NAME}" | tr -dc '0-9') FILES_TO_RUN=$(python .jenkins/get_files_to_run.py) echo "FILES_TO_RUN: " ${FILES_TO_RUN} @@ -116,26 +112,18 @@ if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then # Step 6: Copy generated files to S3, tag with commit ID 7z a worker_${WORKER_ID}.7z docs - awsv2 s3 cp worker_${WORKER_ID}.7z s3://${BUCKET_NAME}/${COMMIT_ID}/worker_${WORKER_ID}.7z --acl public-read -elif [[ "${JOB_BASE_NAME}" == *manager ]]; then + awsv2 s3 cp worker_${WORKER_ID}.7z s3://${BUCKET_NAME}/${COMMIT_ID}/worker_${WORKER_ID}.7z +elif [[ "${JOB_TYPE}" == "manager" ]]; then # Step 1: Generate no-plot HTML pages for all tutorials make html-noplot cp -r _build/html docs # Step 2: Wait for all workers to finish - set +e - for ((worker_id=0;worker_id List[str]: sources = [x.relative_to(REPO_BASE_DIR) for x in REPO_BASE_DIR.glob("*_source/**/*.py") if 'data' not in x.parts] - return [str(x) for x in sources] + return sorted([str(x) for x in sources]) def read_metadata() -> Dict[str, Any]: @@ -87,8 +87,8 @@ def parse_args() -> Any: from argparse import ArgumentParser parser = ArgumentParser("Select files to run") parser.add_argument("--dry-run", action="store_true") - parser.add_argument("--num-shards", type=int, default=int(os.environ.get("NUM_WORKERS", 20))) - parser.add_argument("--shard-num", type=int, default=int(os.environ.get("WORKER_ID", 0))) + parser.add_argument("--num-shards", type=int, default=int(os.environ.get("NUM_WORKERS", "20"))) + parser.add_argument("--shard-num", type=int, default=int(os.environ.get("WORKER_ID", "1"))) return parser.parse_args() @@ -96,7 +96,7 @@ def main() -> None: args = parse_args() all_files = get_all_files() - files_to_run = calculate_shards(all_files, num_shards=args.num_shards)[args.shard_num] + files_to_run = calculate_shards(all_files, num_shards=args.num_shards)[args.shard_num - 1] if not args.dry_run: remove_other_files(all_files, compute_files_to_keep(files_to_run)) stripped_file_names = [Path(x).stem for x in files_to_run] From 121f71a0bd928fb018c1e409efe31f88478f1818 Mon Sep 17 00:00:00 2001 From: clee2000 <44682903+clee2000@users.noreply.github.com> Date: Mon, 5 Jun 2023 14:59:50 -0700 Subject: [PATCH 36/67] Upload docs preview (#2426) Add step to upload docs preview in manager preview at: https://docs-preview.pytorch.org/pytorch/tutorials/2426/index.html --- .github/workflows/build-tutorials.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/build-tutorials.yml b/.github/workflows/build-tutorials.yml index 222de26b9bb..c242a1897c6 100644 --- a/.github/workflows/build-tutorials.yml +++ b/.github/workflows/build-tutorials.yml @@ -177,6 +177,16 @@ jobs: docker exec -t "${container_name}" sh -c ".jenkins/build.sh" + - name: Upload docs preview + uses: seemethere/upload-artifact-s3@v5 + if: ${{ github.event_name == 'pull_request' }} + with: + retention-days: 14 + s3-bucket: doc-previews + if-no-files-found: error + path: docs + s3-prefix: pytorch/tutorials/${{ github.event.pull_request.number }} + - name: Teardown Linux uses: pytorch/test-infra/.github/actions/teardown-linux@main if: always() From 1068abed74b30919179c40e88fa81514b8a3f5e0 Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Mon, 5 Jun 2023 17:13:41 -0700 Subject: [PATCH 37/67] [BE] Delete `from __future__ import` (#2429) As Python-2.7 is long past EOL, and all tutorials are living in the future :) --- advanced_source/cpp_frontend.rst | 3 --- advanced_source/neural_style_tutorial.py | 2 -- beginner_source/chatbot_tutorial.py | 5 ----- beginner_source/data_loading_tutorial.py | 1 - beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py | 5 ----- beginner_source/fgsm_tutorial.py | 1 - beginner_source/transfer_learning_tutorial.py | 2 -- intermediate_source/char_rnn_classification_tutorial.py | 1 - intermediate_source/char_rnn_generation_tutorial.py | 1 - intermediate_source/dynamic_quantization_bert_tutorial.rst | 2 -- intermediate_source/seq2seq_translation_tutorial.py | 1 - intermediate_source/spatial_transformer_tutorial.py | 1 - prototype_source/graph_mode_dynamic_bert_tutorial.rst | 2 -- prototype_source/numeric_suite_tutorial.py | 1 - 14 files changed, 28 deletions(-) diff --git a/advanced_source/cpp_frontend.rst b/advanced_source/cpp_frontend.rst index 11033951ece..901658183c7 100644 --- a/advanced_source/cpp_frontend.rst +++ b/advanced_source/cpp_frontend.rst @@ -1216,9 +1216,6 @@ tensors and display them with matplotlib: .. code-block:: python - from __future__ import print_function - from __future__ import unicode_literals - import argparse import matplotlib.pyplot as plt diff --git a/advanced_source/neural_style_tutorial.py b/advanced_source/neural_style_tutorial.py index 54085fb1e98..4c42c228448 100644 --- a/advanced_source/neural_style_tutorial.py +++ b/advanced_source/neural_style_tutorial.py @@ -47,8 +47,6 @@ # - ``torchvision.models`` (train or load pretrained models) # - ``copy`` (to deep copy the models; system package) -from __future__ import print_function - import torch import torch.nn as nn import torch.nn.functional as F diff --git a/beginner_source/chatbot_tutorial.py b/beginner_source/chatbot_tutorial.py index 02185a6ba3e..44310cc3620 100644 --- a/beginner_source/chatbot_tutorial.py +++ b/beginner_source/chatbot_tutorial.py @@ -92,11 +92,6 @@ # After that, let’s import some necessities. # -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - import torch from torch.jit import script, trace import torch.nn as nn diff --git a/beginner_source/data_loading_tutorial.py b/beginner_source/data_loading_tutorial.py index d5326f6e9a6..7ec18236b33 100644 --- a/beginner_source/data_loading_tutorial.py +++ b/beginner_source/data_loading_tutorial.py @@ -18,7 +18,6 @@ """ -from __future__ import print_function, division import os import torch import pandas as pd diff --git a/beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py b/beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py index 5e985b58598..508fa5a057a 100644 --- a/beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py +++ b/beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py @@ -101,11 +101,6 @@ # maximum length output that the model is capable of producing. # -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - import torch import torch.nn as nn import torch.nn.functional as F diff --git a/beginner_source/fgsm_tutorial.py b/beginner_source/fgsm_tutorial.py index fa23680496c..e200f09a712 100644 --- a/beginner_source/fgsm_tutorial.py +++ b/beginner_source/fgsm_tutorial.py @@ -90,7 +90,6 @@ # into the implementation. # -from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F diff --git a/beginner_source/transfer_learning_tutorial.py b/beginner_source/transfer_learning_tutorial.py index b09efc11749..f08312522c8 100644 --- a/beginner_source/transfer_learning_tutorial.py +++ b/beginner_source/transfer_learning_tutorial.py @@ -33,8 +33,6 @@ # License: BSD # Author: Sasank Chilamkurthy -from __future__ import print_function, division - import torch import torch.nn as nn import torch.optim as optim diff --git a/intermediate_source/char_rnn_classification_tutorial.py b/intermediate_source/char_rnn_classification_tutorial.py index 0c0aa3e988b..0957b109b3a 100644 --- a/intermediate_source/char_rnn_classification_tutorial.py +++ b/intermediate_source/char_rnn_classification_tutorial.py @@ -74,7 +74,6 @@ ``{language: [names ...]}``. The generic variables "category" and "line" (for language and name in our case) are used for later extensibility. """ -from __future__ import unicode_literals, print_function, division from io import open import glob import os diff --git a/intermediate_source/char_rnn_generation_tutorial.py b/intermediate_source/char_rnn_generation_tutorial.py index d0c1c553865..5e0f6308c01 100644 --- a/intermediate_source/char_rnn_generation_tutorial.py +++ b/intermediate_source/char_rnn_generation_tutorial.py @@ -75,7 +75,6 @@ and end up with a dictionary ``{language: [names ...]}``. """ -from __future__ import unicode_literals, print_function, division from io import open import glob import os diff --git a/intermediate_source/dynamic_quantization_bert_tutorial.rst b/intermediate_source/dynamic_quantization_bert_tutorial.rst index 39cff5a22c5..6ece2a9d405 100644 --- a/intermediate_source/dynamic_quantization_bert_tutorial.rst +++ b/intermediate_source/dynamic_quantization_bert_tutorial.rst @@ -92,8 +92,6 @@ In this step we import the necessary Python modules for the tutorial. .. code:: python - from __future__ import absolute_import, division, print_function - import logging import numpy as np import os diff --git a/intermediate_source/seq2seq_translation_tutorial.py b/intermediate_source/seq2seq_translation_tutorial.py index 7953854e60a..776197fbbd1 100644 --- a/intermediate_source/seq2seq_translation_tutorial.py +++ b/intermediate_source/seq2seq_translation_tutorial.py @@ -78,7 +78,6 @@ **Requirements** """ -from __future__ import unicode_literals, print_function, division from io import open import unicodedata import string diff --git a/intermediate_source/spatial_transformer_tutorial.py b/intermediate_source/spatial_transformer_tutorial.py index b566e7e4e0b..49b6b0f0a2b 100644 --- a/intermediate_source/spatial_transformer_tutorial.py +++ b/intermediate_source/spatial_transformer_tutorial.py @@ -27,7 +27,6 @@ # License: BSD # Author: Ghassen Hamrouni -from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F diff --git a/prototype_source/graph_mode_dynamic_bert_tutorial.rst b/prototype_source/graph_mode_dynamic_bert_tutorial.rst index 2a296ccfa6b..b7757d8a1de 100644 --- a/prototype_source/graph_mode_dynamic_bert_tutorial.rst +++ b/prototype_source/graph_mode_dynamic_bert_tutorial.rst @@ -40,8 +40,6 @@ Once all the necesessary packages are downloaded and installed we setup the code .. code:: python - from __future__ import absolute_import, division, print_function - import logging import numpy as np import os diff --git a/prototype_source/numeric_suite_tutorial.py b/prototype_source/numeric_suite_tutorial.py index 35052f4b2f4..ee486d43c3b 100644 --- a/prototype_source/numeric_suite_tutorial.py +++ b/prototype_source/numeric_suite_tutorial.py @@ -24,7 +24,6 @@ ############################################################################## -from __future__ import print_function, division, absolute_import import numpy as np import torch import torch.nn as nn From d541f746d1d60b787f389e915e96391a6c26d4f2 Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Tue, 6 Jun 2023 13:20:42 +0000 Subject: [PATCH 38/67] [BE] Cleanup + set random seed - Remove unnecessary brackets - Use f-strings - Set random seed for reproducibility --- beginner_source/fgsm_tutorial.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/beginner_source/fgsm_tutorial.py b/beginner_source/fgsm_tutorial.py index e200f09a712..6071cb2fb35 100644 --- a/beginner_source/fgsm_tutorial.py +++ b/beginner_source/fgsm_tutorial.py @@ -98,13 +98,6 @@ import numpy as np import matplotlib.pyplot as plt -# NOTE: This is a hack to get around "User-agent" limitations when downloading MNIST datasets -# see, https://github.com/pytorch/vision/issues/3497 for more information -from six.moves import urllib -opener = urllib.request.build_opener() -opener.addheaders = [('User-agent', 'Mozilla/5.0')] -urllib.request.install_opener(opener) - ###################################################################### # Implementation @@ -140,6 +133,8 @@ epsilons = [0, .05, .1, .15, .2, .25, .3] pretrained_model = "data/lenet_mnist_model.pth" use_cuda=True +# Set random seed for reproducibility +torch.manual_seed(42) ###################################################################### @@ -178,18 +173,18 @@ def forward(self, x): test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), - ])), + ])), batch_size=1, shuffle=True) # Define what device we are using print("CUDA Available: ",torch.cuda.is_available()) -device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu") +device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu") # Initialize the network model = Net().to(device) # Load the pretrained model -model.load_state_dict(torch.load(pretrained_model, map_location='cpu')) +model.load_state_dict(torch.load(pretrained_model, weights_only=True, map_location='cpu')) # Set the model in evaluation mode. In this case this is for the Dropout layers model.eval() @@ -289,7 +284,7 @@ def test( model, device, test_loader, epsilon ): if final_pred.item() == target.item(): correct += 1 # Special case for saving 0 epsilon examples - if (epsilon == 0) and (len(adv_examples) < 5): + if epsilon == 0 and len(adv_examples) < 5: adv_ex = perturbed_data.squeeze().detach().cpu().numpy() adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) ) else: @@ -300,7 +295,7 @@ def test( model, device, test_loader, epsilon ): # Calculate final accuracy for this epsilon final_acc = correct/float(len(test_loader)) - print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc)) + print(f"Epsilon: {epsilon}\tTest Accuracy = {correct} / {len(test_loader)} = {final_acc}") # Return the accuracy and an adversarial example return final_acc, adv_examples @@ -386,9 +381,9 @@ def test( model, device, test_loader, epsilon ): plt.xticks([], []) plt.yticks([], []) if j == 0: - plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14) + plt.ylabel(f"Eps: {epsilons[i]}", fontsize=14) orig,adv,ex = examples[i][j] - plt.title("{} -> {}".format(orig, adv)) + plt.title(f"{orig} -> {adv}") plt.imshow(ex, cmap="gray") plt.tight_layout() plt.show() From 1d90341f2704f2adaa8039d0b23a49bc54c256fd Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Tue, 6 Jun 2023 12:31:29 -0700 Subject: [PATCH 39/67] [BE] Simplify `ids_tensor` (#2431) Remove `global_rng` and use `torch.randint` to feel the tensor of shape `shape` with values in range `[0, vocab_size)` Co-authored-by: Svetlana Karslioglu --- .../dynamic_quantization_bert_tutorial.rst | 17 ++--------------- .../graph_mode_dynamic_bert_tutorial.rst | 17 ++--------------- 2 files changed, 4 insertions(+), 30 deletions(-) diff --git a/intermediate_source/dynamic_quantization_bert_tutorial.rst b/intermediate_source/dynamic_quantization_bert_tutorial.rst index 6ece2a9d405..dd76d08956f 100644 --- a/intermediate_source/dynamic_quantization_bert_tutorial.rst +++ b/intermediate_source/dynamic_quantization_bert_tutorial.rst @@ -253,8 +253,6 @@ model before and after the dynamic quantization. torch.manual_seed(seed) set_seed(42) - # Initialize a global random number generator - global_rng = random.Random() 2.2 Load the fine-tuned BERT model @@ -526,20 +524,9 @@ We can serialize and save the quantized model for the future use using .. code:: python - def ids_tensor(shape, vocab_size, rng=None, name=None): + def ids_tensor(shape, vocab_size): # Creates a random int32 tensor of the shape within the vocab size - if rng is None: - rng = global_rng - - total_dims = 1 - for dim in shape: - total_dims *= dim - - values = [] - for _ in range(total_dims): - values.append(rng.randint(0, vocab_size - 1)) - - return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous() + return torch.randint(0, vocab_size, shape=shape, dtype=torch.int, device='cpu') input_ids = ids_tensor([8, 128], 2) token_type_ids = ids_tensor([8, 128], 2) diff --git a/prototype_source/graph_mode_dynamic_bert_tutorial.rst b/prototype_source/graph_mode_dynamic_bert_tutorial.rst index b7757d8a1de..5d76ddef79a 100644 --- a/prototype_source/graph_mode_dynamic_bert_tutorial.rst +++ b/prototype_source/graph_mode_dynamic_bert_tutorial.rst @@ -60,22 +60,9 @@ Once all the necesessary packages are downloaded and installed we setup the code from torch.quantization import per_channel_dynamic_qconfig from torch.quantization import quantize_dynamic_jit - global_rng = random.Random() - - def ids_tensor(shape, vocab_size, rng=None, name=None): + def ids_tensor(shape, vocab_size): # Creates a random int32 tensor of the shape within the vocab size - if rng is None: - rng = global_rng - - total_dims = 1 - for dim in shape: - total_dims *= dim - - values = [] - for _ in range(total_dims): - values.append(rng.randint(0, vocab_size - 1)) - - return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous() + return torch.randint(0, vocab_size, shape=shape, dtype=torch.int, device='cpu') # Setup logging logger = logging.getLogger(__name__) From 2284ab2381c623291ccb4a476e3753aac5671fd9 Mon Sep 17 00:00:00 2001 From: Laith Hasanian Date: Tue, 6 Jun 2023 13:12:38 -0700 Subject: [PATCH 40/67] Update torch_compile_tutorial.py to use unused parameter (#2436) I noticed when reading through these docs that the two examples did not use the parameter 'y'. I assume it was meant to be used so I updated the code in the examples. Another possibility is that we don't need param 'y' and only need 'x'. Let me know if that is the case and I will fix this :) --- intermediate_source/torch_compile_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/intermediate_source/torch_compile_tutorial.py b/intermediate_source/torch_compile_tutorial.py index d4b8e54b9ed..b8a37cc0a7e 100644 --- a/intermediate_source/torch_compile_tutorial.py +++ b/intermediate_source/torch_compile_tutorial.py @@ -69,7 +69,7 @@ def foo(x, y): a = torch.sin(x) - b = torch.cos(x) + b = torch.cos(y) return a + b opt_foo1 = torch.compile(foo) print(opt_foo1(torch.randn(10, 10), torch.randn(10, 10))) @@ -80,7 +80,7 @@ def foo(x, y): @torch.compile def opt_foo2(x, y): a = torch.sin(x) - b = torch.cos(x) + b = torch.cos(y) return a + b print(opt_foo2(torch.randn(10, 10), torch.randn(10, 10))) From 6e0fd0a8d239018e3010466614cb993d13acb32e Mon Sep 17 00:00:00 2001 From: Pratik Hublikar <57823560+neuralninja27@users.noreply.github.com> Date: Wed, 7 Jun 2023 02:24:58 +0530 Subject: [PATCH 41/67] Update mario_rl_tutorial.py (#2381) * Update mario_rl_tutorial.py Fixes #1620 --------- Co-authored-by: Vincent Moens Co-authored-by: Svetlana Karslioglu --- intermediate_source/mario_rl_tutorial.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/intermediate_source/mario_rl_tutorial.py b/intermediate_source/mario_rl_tutorial.py index 8d02f3daf34..eb46feb2ad0 100755 --- a/intermediate_source/mario_rl_tutorial.py +++ b/intermediate_source/mario_rl_tutorial.py @@ -53,6 +53,8 @@ # Super Mario environment for OpenAI Gym import gym_super_mario_bros +from tensordict import TensorDict +from torchrl.data import TensorDictReplayBuffer, LazyMemmapStorage ###################################################################### # RL Definitions @@ -348,7 +350,7 @@ def act(self, state): class Mario(Mario): # subclassing for continuity def __init__(self, state_dim, action_dim, save_dir): super().__init__(state_dim, action_dim, save_dir) - self.memory = deque(maxlen=100000) + self.memory = TensorDictReplayBuffer(storage=LazyMemmapStorage(100000)) self.batch_size = 32 def cache(self, state, next_state, action, reward, done): @@ -373,14 +375,15 @@ def first_if_tuple(x): reward = torch.tensor([reward], device=self.device) done = torch.tensor([done], device=self.device) - self.memory.append((state, next_state, action, reward, done,)) + # self.memory.append((state, next_state, action, reward, done,)) + self.memory.add(TensorDict({"state": state, "next_state": next_state, "action": action, "reward": reward, "done": done}, batch_size=[])) def recall(self): """ Retrieve a batch of experiences from memory """ - batch = random.sample(self.memory, self.batch_size) - state, next_state, action, reward, done = map(torch.stack, zip(*batch)) + batch = self.memory.sample(self.batch_size) + state, next_state, action, reward, done = (batch.get(key) for key in ("state", "next_state", "action", "reward", "done")) return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze() From 730029b88c4bbac8febd9618bbcf5af60945c89c Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Tue, 6 Jun 2023 16:05:39 -0700 Subject: [PATCH 42/67] Update rpc_ddp_tutorial.rst (#2437) Update the github username of an author --- advanced_source/rpc_ddp_tutorial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/advanced_source/rpc_ddp_tutorial.rst b/advanced_source/rpc_ddp_tutorial.rst index 747c12f6d4f..5c7aeffb2f9 100644 --- a/advanced_source/rpc_ddp_tutorial.rst +++ b/advanced_source/rpc_ddp_tutorial.rst @@ -1,6 +1,6 @@ Combining Distributed DataParallel with Distributed RPC Framework ================================================================= -**Authors**: `Pritam Damania `_ and `Yi Wang `_ +**Authors**: `Pritam Damania `_ and `Yi Wang `_ .. note:: |edit| View and edit this tutorial in `github `__. From eaa2e901bf28add46763429f34055d901057a905 Mon Sep 17 00:00:00 2001 From: clee2000 <44682903+clee2000@users.noreply.github.com> Date: Wed, 7 Jun 2023 10:27:29 -0700 Subject: [PATCH 43/67] Set random seed (#2438) To make tutorial builds predictable, but still keep randomness when one rans it on Collab. Also, reset default_device after every tutorial runCo-authored-by: Nikita Shulga Co-authored-by: Nikita Shulga --- conf.py | 9 ++++++++- recipes_source/recipes/changing_default_device.py | 3 --- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/conf.py b/conf.py index eaa25a956c6..5f88045adb3 100644 --- a/conf.py +++ b/conf.py @@ -34,6 +34,7 @@ import pytorch_sphinx_theme import torch import glob +import random import shutil from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective import distutils.file_util @@ -85,6 +86,11 @@ # -- Sphinx-gallery configuration -------------------------------------------- +def reset_seeds(gallery_conf, fname): + torch.manual_seed(42) + torch.set_default_device(None) + random.seed(10) + sphinx_gallery_conf = { 'examples_dirs': ['beginner_source', 'intermediate_source', 'advanced_source', 'recipes_source', 'prototype_source'], @@ -94,7 +100,8 @@ 'backreferences_dir': None, 'first_notebook_cell': ("# For tips on running notebooks in Google Colab, see\n" "# https://pytorch.org/tutorials/beginner/colab\n" - "%matplotlib inline") + "%matplotlib inline"), + 'reset_modules': (reset_seeds) } if os.getenv('GALLERY_PATTERN'): diff --git a/recipes_source/recipes/changing_default_device.py b/recipes_source/recipes/changing_default_device.py index f5e50b3f0be..103560fd743 100644 --- a/recipes_source/recipes/changing_default_device.py +++ b/recipes_source/recipes/changing_default_device.py @@ -43,9 +43,6 @@ print(mod.weight.device) print(mod(torch.randn(128, 20)).device) -# And then globally return it back to CPU -torch.set_default_device('cpu') - ################################################################ # This function imposes a slight performance cost on every Python # call to the torch API (not just factory functions). If this From d9938ee19e585d81e4d0f1b30596f621a5113b91 Mon Sep 17 00:00:00 2001 From: Boadi Samson Date: Thu, 8 Jun 2023 00:20:16 +0200 Subject: [PATCH 44/67] created original copy of the model by loading from disk (#2406) * created original copy of the model by loading from disk * Update fx_graph_mode_ptq_dynamic.py --------- Co-authored-by: Svetlana Karslioglu --- prototype_source/fx_graph_mode_ptq_dynamic.py | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/prototype_source/fx_graph_mode_ptq_dynamic.py b/prototype_source/fx_graph_mode_ptq_dynamic.py index eda88ff5c01..98ece5f3d31 100644 --- a/prototype_source/fx_graph_mode_ptq_dynamic.py +++ b/prototype_source/fx_graph_mode_ptq_dynamic.py @@ -239,9 +239,27 @@ def evaluate(model_, data_source): .set_object_type(nn.LSTM, default_dynamic_qconfig) .set_object_type(nn.Linear, default_dynamic_qconfig) ) -# Deepcopying the original model because quantization api changes the model inplace and we want +# Load model to create the original model because quantization api changes the model inplace and we want # to keep the original model for future comparison -model_to_quantize = copy.deepcopy(model) + + +model_to_quantize = LSTMModel( + ntoken = ntokens, + ninp = 512, + nhid = 256, + nlayers = 5, +) + +model_to_quantize.load_state_dict( + torch.load( + model_data_filepath + 'word_language_model_quantize.pth', + map_location=torch.device('cpu') + ) + ) + +model_to_quantize.eval() + + prepared_model = prepare_fx(model_to_quantize, qconfig_mapping, example_inputs) print("prepared model:", prepared_model) quantized_model = convert_fx(prepared_model) @@ -289,4 +307,4 @@ def time_model_evaluation(model, test_data): # 3. Conclusion # ------------- # This tutorial introduces the api for post training dynamic quantization in FX Graph Mode, -# which dynamically quantizes the same modules as Eager Mode Quantization. \ No newline at end of file +# which dynamically quantizes the same modules as Eager Mode Quantization. From fc7494da87419c78c57aa0f9e778a581872e3387 Mon Sep 17 00:00:00 2001 From: William Wen Date: Thu, 8 Jun 2023 07:32:50 -0700 Subject: [PATCH 45/67] Typo fix to torch_compile_tutorial.py (#2446) "evaluating and training ResNet-18 on random data" --> "evaluating and training a ``torchvision`` model on random data", since speedups are no longer demonstrated on resnet18. --- intermediate_source/torch_compile_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intermediate_source/torch_compile_tutorial.py b/intermediate_source/torch_compile_tutorial.py index b8a37cc0a7e..40a53c263ed 100644 --- a/intermediate_source/torch_compile_tutorial.py +++ b/intermediate_source/torch_compile_tutorial.py @@ -105,7 +105,7 @@ def forward(self, x): # # Let's now demonstrate that using ``torch.compile`` can speed # up real models. We will compare standard eager mode and -# ``torch.compile`` by evaluating and training ResNet-18 on random data. +# ``torch.compile`` by evaluating and training a ``torchvision`` model on random data. # # Before we start, we need to define some utility functions. From 3b6d83b2130903be87693a4813f0be70990710a0 Mon Sep 17 00:00:00 2001 From: BJ Hargrave Date: Thu, 8 Jun 2023 11:55:04 -0400 Subject: [PATCH 46/67] Change paper reference to a paper matching the model used (#2424) Fixes https://github.com/pytorch/tutorials/issues/1642 Signed-off-by: BJ Hargrave Co-authored-by: sekyondaMeta <127536312+sekyondaMeta@users.noreply.github.com> --- intermediate_source/seq2seq_translation_tutorial.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/intermediate_source/seq2seq_translation_tutorial.py b/intermediate_source/seq2seq_translation_tutorial.py index 776197fbbd1..ea583821f85 100644 --- a/intermediate_source/seq2seq_translation_tutorial.py +++ b/intermediate_source/seq2seq_translation_tutorial.py @@ -45,7 +45,7 @@ :alt: To improve upon this model we'll use an `attention -mechanism `__, which lets the decoder +mechanism `__, which lets the decoder learn to focus over a specific range of the input sequence. **Recommended Reading:** @@ -66,8 +66,8 @@ Statistical Machine Translation `__ - `Sequence to Sequence Learning with Neural Networks `__ -- `Neural Machine Translation by Jointly Learning to Align and - Translate `__ +- `Effective Approaches to Attention-based Neural Machine + Translation `__ - `A Neural Conversational Model `__ You will also find the previous tutorials on From 1fe4025f750d8042e5245fe9cbc50627ca2d6abf Mon Sep 17 00:00:00 2001 From: BJ Hargrave Date: Thu, 8 Jun 2023 12:29:49 -0400 Subject: [PATCH 47/67] Set the random seed for reproducibility of the output (#2428) We also fix the code to use the scripted_cell just created. Fixes https://github.com/pytorch/tutorials/issues/1449 Signed-off-by: BJ Hargrave --- beginner_source/Intro_to_TorchScript_tutorial.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beginner_source/Intro_to_TorchScript_tutorial.py b/beginner_source/Intro_to_TorchScript_tutorial.py index d369c4fbf80..21ee32ff384 100644 --- a/beginner_source/Intro_to_TorchScript_tutorial.py +++ b/beginner_source/Intro_to_TorchScript_tutorial.py @@ -33,6 +33,7 @@ import torch # This is all you need to use both PyTorch and TorchScript! print(torch.__version__) +torch.manual_seed(191009) # set the seed for reproducibility ###################################################################### @@ -308,7 +309,7 @@ def forward(self, x, h): # New inputs x, h = torch.rand(3, 4), torch.rand(3, 4) -traced_cell(x, h) +print(scripted_cell(x, h)) ###################################################################### From 2bdd8460fd912024cdecb9ddd2a2b4b4c1473e50 Mon Sep 17 00:00:00 2001 From: Degao Chu Date: Fri, 9 Jun 2023 23:55:15 +0800 Subject: [PATCH 48/67] Update example link in FSDP_adavnced_tutorial.rst (#2448) Co-authored-by: Svetlana Karslioglu --- intermediate_source/FSDP_adavnced_tutorial.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/intermediate_source/FSDP_adavnced_tutorial.rst b/intermediate_source/FSDP_adavnced_tutorial.rst index cce90e8787e..748c8593306 100644 --- a/intermediate_source/FSDP_adavnced_tutorial.rst +++ b/intermediate_source/FSDP_adavnced_tutorial.rst @@ -75,7 +75,7 @@ highlight different available features in FSDP that are helpful for training large scale model above 3B parameters. Also, we cover specific features for Transformer based models. The code for this tutorial is available in `Pytorch Examples -`__. +`__. *Setup* @@ -97,13 +97,13 @@ Please create a `data` folder, download the WikiHow dataset from `wikihowAll.csv `wikihowSep.cs `__, and place them in the `data` folder. We will use the wikihow dataset from `summarization_dataset -`__. +`__. Next, we add the following code snippets to a Python script “T5_training.py”. .. note:: The full source code for this tutorial is available in `PyTorch examples - `__. + `__. 1.3 Import necessary packages: From 203f567555b1c4cc86d03fd36f7b7ae4c3405262 Mon Sep 17 00:00:00 2001 From: NM512 <70328564+NM512@users.noreply.github.com> Date: Sat, 10 Jun 2023 01:32:35 +0900 Subject: [PATCH 49/67] Update transformer_tutorial.py (#2451) Co-authored-by: NM512 Co-authored-by: Svetlana Karslioglu --- beginner_source/transformer_tutorial.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index a3fc3ab16eb..2f87117752f 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -2,7 +2,7 @@ Language Modeling with ``nn.Transformer`` and torchtext =============================================================== -This is a tutorial on training a sequence-to-sequence model that uses the +This is a tutorial on training a model to predict the next word in a sequence using the `nn.Transformer `__ module. The PyTorch 1.2 release includes a standard transformer module based on the @@ -29,7 +29,9 @@ ###################################################################### # In this tutorial, we train a ``nn.TransformerEncoder`` model on a -# language modeling task. The language modeling task is to assign a +# language modeling task. Please note that this tutorial does not cover +# the training of `nn.TransformerDecoder `__, as depicted in +# the right half of the diagram above. The language modeling task is to assign a # probability for the likelihood of a given word (or a sequence of words) # to follow a sequence of words. A sequence of tokens are passed to the embedding # layer first, followed by a positional encoding layer to account for the order @@ -130,6 +132,7 @@ def forward(self, x: Tensor) -> Tensor: # .. code-block:: bash # # %%bash +# pip install portalocker # pip install torchdata # # The vocab object is built based on the train dataset and is used to numericalize From a5376f73f619b7ab3d1a29a7b39bcffa3b91fd0f Mon Sep 17 00:00:00 2001 From: Qasim Khan Date: Fri, 9 Jun 2023 22:45:43 +0500 Subject: [PATCH 50/67] Fix Attention in seq2seq_translation_tutorial AttnDecoderRNN (#2452) * replace old decoder diagram with new one * remove 1 from encoder1 and decoder1 * fix attention in AttnDecoderRNN * Fix formatting going over max character count --------- Co-authored-by: Svetlana Karslioglu --- .../attention-decoder-network.png | Bin 36999 -> 44701 bytes .../seq2seq_translation_tutorial.py | 38 +++++++++--------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/_static/img/seq-seq-images/attention-decoder-network.png b/_static/img/seq-seq-images/attention-decoder-network.png index 243f87c6e972bcb712cfc1d3fa250bc725f8de31..d31d42a5af19b96af1163fbdc6fcd0e5d4a9d79f 100755 GIT binary patch literal 44701 zcmeEv1zc2F`~MgiAYynEf-)P4CxH(~4FseFK%iItSerT+z~__ZVt1@Spwm_8 ze}`(JDY_t#t}jej6$Y`>H!{@&QLqT@y`x}eG=y5gC|GV$Ff;3zo73wW=@?k*K&h5Kk%=DgpQzqF6Dax=Awx?j1Sk<pc*V`-A)w40W2iyiT1&c5RqX2Lc`Y(&ne%AsY z%yfX;-0gB%pttCl_F6F7?_!9Jjy0>Ynw6aCEhc@DTP$jNccg#W+CiHTJ5fs=b3-Yp zuAV8-u&&)+vrMe4oO_Mx+V7RHaqeHm!1CZaUlk}AIqcsMFsQrFX<%)nt7o;}+FlV1 z3N?ipng6`-9@NZC@814+@3*L9X^9@>-3Qz+(ubPvKiz(TIq={=tAQ?1xTjil1&Rz|5YzpmdvgYTp}nsEl_%XVvVvM$0_$aWy8P+`?AL?oSQ-GJ z^zZBKuEgISzMlqBR1d%kn58{1kd}I;Ixr)fpWmp?{scDoQV+fSzh#htI{S0vx9`|$MFjvp`|E$vl--e#l+AYO3y;~n(JgAcdRSo6~-G|r$ueTQ*p-kl)7 z-P!+t1OF{;A6!Ae(ozRKs}3fRIn>Au2AI5`n}faDzx|4yAhPJQfz}Ry{Q#f$VV#ME zZLheU5#2+rR(+uzeQ5{EO^$^tTMnZgs#^9xUbGK4-u7?$Z3d zn%}m9_W{gC$JBZsWGI-~O#vtq)Q9eluD;Gb0J2+HqfL;YqK+XH0uZNz&%g}$Y55-% zqEDu?+BMy1mchho4$Kl@0UjI&-Wi}9Gq=>U0UH|W>gt&tTo>rRy-V-a*qcK?U4i-M z@Na8q1k+P6-@8d$fN%hfeg*Xar8E_SnnEoB2%unQ5fR}O0<1BMpcTv#YNGdRN6^>) zs+@86&+fhNb1azN!GzztHFKSx4CH^h+7DVpF9h^u4wlftXWO56#uqkW_!Y-s*+1`K zVSj(iz`VaWzZ$r|z_R}UmH`+(^a%bgBLF;xZ-ZSl6n=q+AD$8HKmPZ^6C*QoYrrP$ zA=rO0k{kfXZve@Gx%kKJAQB%fo-2*{Ivu4l_B8x zUOvLNj{Ke;^4oL{y`BNy@*kRlZ)yAB3je1*0_!hv=}-0%e%U@7`#v%HlY9^smVLou zA6Ngw_Wx!+!u~}5UN&9N>>i*US%RU~Fu=G2?&|-nJL@<22?tB<2e{r@f|?2?Ip9wzKR{`bNJ zT}!Arz!m-qM7Y5zB6x%I*AU^$k?)BJ2NYnRtuh~c{t6KeK;p+n1orQ(CIWKi_X#xZ z`y0OtCI3Z%CLpilYmuD&An^YC^anlvfByH^;!Fqa?XQ~uu>HT8kGDV1z87Ze?eaJv zsJ~C+?pgxxb|XXwCBW$Z*M^0DPVATuF#9VQ|9cR-gD};1(*nO6jPI-1zYF6`yYZ8M z9wpp={_h1Z`uG0AAoCYz@Lz(=UygiFQpj|$3J+pKzXB(me+xg6<-azxuoo=)hA8tt z6;BR2;5!1!zAXJ$@RKjN@na$i^Dh`P)2|@P-z05?Wj}x6uVpBIqV%-4;J*bKcB4A~ zJf?Bb0pAN5bacNsG5?n?%NKp=Um)Sjk?%<|m=AK^52UAqh}Bm<%Ry}R$CjR0zDoki zSNRLy&1Yfy)&z#VMBINMlzivFH%!OO@UNg3Ul8MOf+YuH$KMW14nXj)Wfxyz$#=_z zVq)FnIKMA|?25bpKrGqsfbT|2OpOf8Aiz`r+W>pwfPDDBAYT51XWKI$s2m3zd|jk1k{!80dXOU^#G6zG4~& zTE`C#2!DGW<6HCUnEs)Obxivm@Vx**=L=!5x(Brc_S^rDFfcRzJoA9{V8`oM@bGsb z4EwI_U+andnS|k6bL^P@p-FaoxCeae$9LMfYX#MZL3Hf?3Rw8j^@(2uR9Sz8H5^1l zesI?C->Y?gYo6Wz{bnt+$NAUi@$bRp_k$B17|aZ8ySt_Re^i5+nfZWP?0X_?2cN&D z8V6-RHmCq!p0%5<`PWj7A3^jy(2jmr)c7w--eo!v+4d*)e@Axh!A$sGWDWQ#G~jDX z{-^zZ`LZJBgB=A2)9`@9eT}aNqxOU2>wd1@UyHAQqARzDe&3L?!nR9K|9PtK%lE=}154`zNBpndIq`!F`oGL|WOLDBzEc@@)GK>R*cy zU$N-#wE=DqGQPn&Fz-s5|2*q(&;j2ITTGz_;9rP+e=WxR;PT%W#ebhlavadcuQ29s zlKs55gub5$wD%1!-^Y^u;K?e#v}7EAUrY9P4?P`xfBEmrI=i0PKhGi^bintrNdL=K z0lX-dUu4*`9q`z%%+B8tVh7ab2QoUl->>j*eidQ= zYqG!k{fVzZc=zbRFJ9~&*gpaN10;JtlmH#RWdr>*2XOv^#x{;1N^VMMYPd<8z26vsxwYtu`jG8L^ zNIM+2Na;GR@XO#MiCz+Ita(?BB;~If@R`8D`g&YNQez9@FHqC=%KDwo4QeKx1}+uL z^|z~Ux*r04igk_?1%XP1#14TCGfe zQu(e;=0&UyySZyL^ju^xt%*(0waW-H)m2as7y~C2>ipJpvPeIWHvZ5}_r~Lq_+0#; zpe*+Xilue6ycN!Qcd8S0l6Cxuc9wS}{A{Ch+nkH#QpFRK8=x;V=bv_8nQoBY-fmv2 z^F_$aJ7+j21SBPp9x2#ao=8$jcIMVFs-?S^kvPm9Ek)l2m&xs2%^pTDF4xW3)flO9 zxQy9;;BtsRdouItOigV;6P%LI^;K};Fv8`7?|amJ?h93A$yfQZxKKTLr|3)y*J`YI zM0%3iM^96hm#*HJ?jO$t_i`=ugPkh6>-Zmxd#Xo=Bu+m}vZ#k42K%p=Iab4mMVuJ0M=-&bdp2}PVa=LMD#A|m zIq!UjdB5ehXXayDQJF$A8%?Oq9Bz(!)Dw|wZ~fnF-mBsm944wX>T{`ZNbF7`(Z?sn zN4nbHZ}tMWMKRwqO&o6VD~N~4K8KWWMJW4oMfuTlg_k@qyJw}zDUD!vwYU4gCf%2N zEQd(hYk0Iy^n*12b>d{JTlJ5@OHN4;JXDX1xwIX*HG;pDPr6_rf96zew5vhkb?mO~ z&TU)#g;&HlEu7X@Ac2jqS`y5X8@Wl^TKrmq2y<3s!XoYMR(pn~6`FIMsECv9m0z{s zdvf9WWQc9!MitJRo@>kLC8lPDWK0(QUhS3VD?R9s&p_GZTTC0Kbt()uU$s?^i?Y6m z41d1T#qeN@fJn2niTj#U<|T3LuvI4W*mxt2iMRgsAC6S&Oy_(1V`gG&>gNpLulVcd z4%79}Yo5Vl_Vsyui$+s5O3XB#o^#S&4v}=pX55$R=}^|%l)XMB2{E6HvlL(3N=5rK zo~VH0t*V(}*oM?>stmE7P1E9_WBtte8QU4xNGzJjCorCx5~+Ccg2*tHTG-`Uu`HP^ z6`0wF7v-oTjteE{W}kKCKR^sqo#-pCdTWO-KWr^FgqYkhh7ylP8eo- z29cobv!TgQBvj_q^-%9m_rXye9_>GZ`jUS)$+|`GTE6j7iDE)gm3HnLTvD2L5nsa6d(mh&*ucpb7gMjs=@4?rvpr0&TXWsPb!oNP>K@7$ zK)mp)p={jIsZGCiCaZ^*!eK_;fjNDJ(5wPmC9V0!%K_b-BPgORJKbWb$pnNni6lvz z9UkJ=DHR7|9Oq9h$I1FQ>T{>gIMPl%Z52=RKA*NBT^1cH!vwx_mM=8yEs+c*EI;kI zgjfIPVrsaf^rQqHVUk5EN3c?2>=r|2@x4LUFp}J$1$*O(!8HHgS6$Ay zEvy_{*Sd>mGhPhdE46s&*k@*6UDp@dVdC@PrPhvN^axv4nm!vGffF`9&fq5LWOpHBEdxUn|hjx%lPOU2ejLbooF;7FSL2iD_PIl-FQ&_Y4EPDw6UF z3C@dm?1_QJkz6|=O*sB=E?--eCk$sHIoenMcC#Hsa*Fp#-UJMhH0`Z6W7N}rB3RRKlE0&ag;7I4@8v#S;Y`}wxa8fY~-0R2IhKiWLU3U#R??;#?h7A^5(!ak;6^S+3 zc($vEMl^_0LcaT!IHsU@onue}=9RzzMLDk^)T2uYPmg=z)_6Q6Jlo+60zuCLzi0OE zUwn6V5_kf{!#D2A)n(B|S~`}A`7g%g&b!Kk+yVsuh?!MVB=6|#)X&Sy>+J6*#Xgx@ zk(6|ez<=oVD^GJ$ZC2ssK4)HD-p-F74Xv%Mm6eqANJvOJdwWf{wzfJE2op_BO=U2c zOiHs?VvGAj-K6SWyNKhXi6a#yGL$C3AM?A@x)*rUz*RS1JU(7~uF~GE1-GvtW_ko- zY@A(QUT*mCjx;U(Cofw-TDbIDksCfoH?xUXJ zP$O>BYK&E5)K2@mnLGmWzfHoTDbE``1dikB>hlv!!Ekxjwvs@}*5W zh8TRZ!C4zJ-5KBL%aAzCE_a0l)o#V-vMCb46^$4`a!xsq`HdUSqEwCNt`@8KCa>f*R1ksggW_axeBsc2=a zqT!TxdWHS``8Qs)F77kyRhZnL>MJm7x?Db0ev*nmua{tLv<vde z7^%t6H?WOt<0l}l7!b|!t3l56MV&@$B|pem3rO}@ewZ(r%LiG!-vz-6iLZ4wXvC@s z#tb%F;om6!WIahRY28W6!C%N|7;O@5_;F{x!Ft7PHs3Jw*)pGXxRPSm>BfRZTmF-N zl4;LYW>x(b4aei@RcR;;OSQta6gD?$4XQ{`uv*<1ekbsgTZ_wO3#(5TN8zHGq?Wyo zQKcP8T7^6WSu`R_Y!byN(~b>4tj=%;(kZ8A``BU59!KX~$fF8Ws^+|;2zkfOde~bZ z>ue=b^IOeXN7S4rsNaZx;=51vymwVx0q0fmd3?2omhq!o8|~6knRf8+DNNXH_yNFs# zzA5+7MvuB9l{mK@^E0tXUlqV=Z~0qOUOy3qBf6o!p?PSiF%(wITdv`3Cz~oVeH#_k z!DS5>K>n0t?zm;-Daj8s)r zL~?(2Wbd<2nG#Ssugp%uH{|Gi8R1!3pK?`+Wx6zG^)n&fL!{YGoh`PdZPi_JP(O0Z&)fp68LgK0>P zkYLR~0cU}~;~b4T40_pmXG)|PQ4vqFdq)zrw@~Io*teHHZY*VgS~Kr;;^IR(Qwmgh zrLgsh!LBby`O2hQ(-_J(36&ieMiGcQWXZt7WmPYF1<`95`y@Ypls>F9U6wXkE2{9M ziI2?{KR4IHy1LbTJ@L)3*mNsr-k61TgRAB%P77ui^5-(J!F2wuY;6bME@r}WTQAHJ z@iT4N{WINamhcVl`0(EKgek3(m2p1nbyyg0>V|FfRPmcF?jDQAm9>XY(<)&WT@^s{pvF-~0T+S#o8RJxVLPn5y2%+t=?A=vEv?jkV5 zolmF#_x1yo=%57?$>d%whE~9gLN-|#Krsntev?f6f@k@06^XIInIj8sxmx+5z3fQ^Gg-PN^Xd1XaSMJ2SOt4mY?pQF44 zXY?UJNP;(QQPgf~>gpaZv$c(1QDEXS+;viR|N7r117 z%VrkOvcyY>KZ9d0y>TIxYDNAL^PZlbjQsr09t0wuoP4>fz5VF0RjlkZhSSrwS)_vY z=G-EG?v2IqkMq&Id<021?bdukXG-%IO*f_ykZz26)0*Q3N4J6lX+5-?Ut*Qp-Y0Pw ziglzDBG0j@R#P|cI&8vMdgDZB34Jl`M3zYg5i?wHBC6BTbjN4|(XA4}OCak&Z?>sJ zNp|5j*#JAF-fq6nIZlvneyuOnwHxo9kFe%|s;@}~weYt$C0ovo(zZmtYNKmjHXiYI?`LLHr)k$q) zMdOw%0TpF$GS8*w{@@4n?GE<^>y5-xZzTjgOHU$_DpkZyPAh$n9SBcLq9F{xWpGYZ z9|&1iH|+x3Vuz%-`N&V|MLSm(_>nFug;Ar=t}_yoistQO-?Z_7sFhGpsKh@EhR}M9 zSH0MJ(JfGQg6-j%K1sDQC+p@ya?QNKo{dhmt{Qr7<9<1TVUY$mB%=7Loy*7z(H=Pi zQoJBDjp_0dh9X4c4sy){oBVCwoZ~PK^yn6NeLnP<`tdZc{v#^9XLv%7`16^+OpGq< zbYp-!QBUlAR3alXZJ?ZA!{^Wa7^Obns|~nvVQL%Hh4P`?7jlubWW@|i)eJ5z$iiX2 zQg7kdPApxaYh8J98*8iW`XhYg36ErAo5ls-joD*9d0;Emdp{`-Ml}6s_(^dPKtNcu-z#=9Yni(wuLKQfQ-aiy2q+o!Y0*7*Ja-o$>ySU zOvdsHC%r1WE+4{3XuhLZWcadV?Q{5p&C9t_CeMj>;u$un2+z0T(}Ep)^E`%!!3n3( z?Y)buCoTC9g1kcrTlaasQDNR9bGVv7XTu!HY3>;7hb{g`V^?^*eq zArs?TxVQaWDn4xSbwBnmHS}#Gf^;V93*UR7wm-AIKj64MJ)A9pdZu9=xk19#k|i~* zlHAtkC@)^WRm|!ZdxH8B7A~u?bSBxFDWdg)Vpr-E&g-{lP)RD$n$BK?at#<0 znZ(D6(@>=51|ePJ6pQ7lB@^jDLclFtXlLtuq4a#&Y#MvLL-z8ve_@+}OaXR9pLTes z875G4R}WG5sc2%mqX*B#V=(UAMfe72Tc%EZGg{F_cX1~H_d1fV{h0x&Q8GPG2WiO+ z)5du8rj=~51c__=3(;<1QXkuqk&ybRnX{yDA>wWz(KU5zXSN+muYhOot$m*^;F1TE zd2Jf>3z8?uQoYdVN&!48yYv*nG&sD$*u(^0R>n(sPTo~YN{W#bB+KTq5YuqJ!TPXX>r1{p&j*p(88 zC>#_kyh9~!9s&spcbN;`Z*Oncb#``cIg>LrWjy`)^YzrHaaV)Mf)d;|sULfv$BYXJ zxsaZ5szOV|e<~Rl1C*Fd6&_Q2U}@F_#4+DRXH$v$&`O<5qykZC3U4%((nztfvZ~tJ z+KPMQQlxIiT*VlX0j9F9Sf0{RoOd3bEjZ_ZNA|>h%I~T8>QX`-X)XRJRp4S&mhfSa z-Q%af^->@HRBgbBeRgGzi70bT?_*+HUj63`J$;$-3)lq7&qA<4??``I1X7#{A^Zl) z7Dm}tN`{#dGby>0EOe!@Sp0MYo98s)e@(dY@6vn?<+=NrWrR{xSUe5@XYYm zRleDF+6K})WWEnm)*F3g`Z8NWu%9IT%8f3@1ZEh(nls6XeoojU5j?}kGq720+<8Lj#wYdF> zI4AJ*=?#VPvc>MV!%jIhVT@FHbbvED-1ewariyhdKk^C#_Z25waCb|=3dg@E8~FS; zJOV!Xh`fblFrJM)eDLNu-`)(D+irz#o}6i`RE99E&{6Koabk zdy0;h1f!*b(%o>(HlW_7Mv03h~=hJauJp^2bZZb51mbPjN#Jf=6`uZL7Y3ktHG&J7I zGkW0?0&2CfJ^>%hr{$Li0Fhj!B<>J(uZ{7YNyAcPVCt=4<6ONX!-?{3d>funZeICE zhebjx#3r}>kZ0-cu;o%j5K`-|%Vb*=t0Wx5RIAld5Y-ArYW8giT1I#mU9jts?vrVC zrna4(4xtQ$Z@^q{qat$Y;cZ2e9D@rYYV3?!nXD8+ac(VADaA4!4#vwDDx%{V&9QNq zV=p)&%b!+OE0Or+(n}>DyJ!~Sojy>+>X5miN(fdVQJ%kHtQdQyovSY!Pr^8YsVDpX zyk#P#gxu?iYwbr#F2$o=0MQOl%2z-mRiST-l8&QFBim4^nH5x+Qge_(o zPV`uw*Z|B1Uab7~)TQzZ)co1=d1CZ2jpak1KUrK>3|Jo=cO|$!-FzO$*IPdn2mCnU zvB_rFtH;l9vd9?_uK^2FJ^dn@_a2VM^37(tG!0zE4v3w;hbCZH1S+P7I&3pX66yQw zCi^;xu1gLtrOeT=uLq3XGrO(vJdOdP%+X8f5*#-~LA_Ne;8ea^YNlqLI;`=2Xr<%5 zb$U?JAR6vUz-&+3XP!W7fnlGVee|FbtYFSxLm16KQg#gzo?qQkJl+@88@1{nU%+A= z-8)}0SU|qaW~5bHMp{5WAgSYt%Xp~Pb;WK2FydIniH;#X)i_$FY#Uu4Pe${TPpH^- z-wW;5#P8E1aXszIu7zvNA`^atQ|mq+PF$^-M&q zKnIuBjiMZ*aFW~dCqnU&<&4Ay)9Z6xg~RyEu<4%Q6RFdDgl3`MMiCM$S8kZ-uM+Ic zaBYe7KdG6+$IsPh$!?!iA-ga@R-Osl7>TrPgL-JSt=W1nmKd?knrFR71zab`(7m%b z*kDsL#1hAsgCd>RGO>dS$#PlZ8fS9J4C~de)4hpLDbC7&#cr3$s34yHQ9@xI&W_;` zAKhUObsmblQCTm0a&#Vhuv)nSgCin+oPr?}nS6f;-*s!r&m}kaS_=se#il(8#Gg3e z1O~>l&cvsj_9B)MV>c*TvLNOa{2V>SHFaD0eOchs8+|4n>$naK6ns??_#2591(^e8 zm_paeWtIv*c?>QpVrpZ&Lg-7S+dpuWD!-E8Ml73Bm{!_}%I}e+1wX0@!dIdwqM_W< z3g39uGHBYPlOIdFS^ZhViab`{3n5}1y>W+C;B?AToO4|3Gp%X$lB#ndg0lnX2hZ)4 z@f1|B`kDR1Aj92RxUs5M`&76z$>2ri{kr71q!vt7Id_{2C_M9$^6CrO*9eD!Iy9v-`zGWbTS!pA}66Vf(Sr^b%z zc}NE4xD^F$-}yG@g!W^XdQN)p;<94&$ zqF;8VucnP$m9T)-JWj;UeR5R7X{Vk0dUyIzqk-Q2fn4jewaR_Alf84V#um)W;bleX z*AzK7jpgjl+9Fan1Z&A~r5F!u&f$?3KS*5Dm@aK0=58<@JgGxalEx=De2h*871x_j z#n`a`-F_uhJ1KW4*o5Or)Ml1aFfiD|*$SfAd`p|KbHAvh5ePJc{R5 zjfdPFJ4$tFc}C9ee82*U(>Pb}B#U-98*6`Zyjd;jJ6ZfHB$pAFf?7r{pbkeu{(Mk; zK#*V^8-~j8lFxnz}KPBf{wK#7e2K+h$h}2w)mqUb-hrKs1C%7H9TWS%7Xlq}Z?o(3H#DkL; zH1IunH+TmyKdKLDAHk2nhteujP_0W-iJ+D<5?^;i(S> z)`g?~^&xavq*mm>{TW?OkT^k(@uHuUm--M9zNn0BK2X;}=6Ay*|0Z}sB>?5M^qm2s z5s(eu$EIx*c=>wonex%minY%RG2qCk`h%bfDiz@|$tt~QWH+^WvmC$`ABR2R|L$>T z+t=5~oHNVTpe!DPQ&6o<&za4>NP#mqwxB!)4WT@c<5~p`fFu_NDI0x4zBsGqD8ott zuyS14INa;O$BEDKM6)iRN$0kR?I3oRZmy7lyPP+Ey`68R-pr6kR@h8KL#ZyTCUABLeB`jEOo=FZ~Rl5qhDNddM0`*`1<`*~Swto%v;jFsPA{TK7lD#!1 zCnsOk)QF^~r{|=l(M(TFyc`?TpPrp9=o|?)=Pe0>Jj;lyMP`o@S50F@N65YkO{umPXDzi9XI)~pk5&ly$_dNX{V62~ zq+JQP+sC7v8a_WKYUs2TIr8YJ0M-EDY@d#Rz3g5Dvv9{z6M`;X0XWm?uFckrp;lcE zjZ`6u6#D{7N%`G;q7WA?j+rL#DM@f@y>()E){cs$R&aZJkqbmc!-;#Cq8K0rqs~YJ zz;A#kTdjSRbWh#5g`iE%y3@pVo7I;ns+g3n$(k^0MnY5BX(z#tXxb*8^Njp!&UU46 zKmqT5jjdmjiSwZEC5;oYAAQ_nXt<`G24{3JV3i3kPEN2n8b)@v;ZuZ`40+AT;S!_M zbV?~*2R@^-QhK5&*{?EuC@y@g-i6c1r0(R(e92oqRKitl1;}U_yhnYK#+2RmL0bu= zJ51H3xNDPvG7N&a*l0DL<1?;8SunfF{oq)Au>W(wk>Ye2J2aSGzWi(?JBiJ6LsP)I zT1cqNs=L>mmkJx*KXhagT7{G( zn1X6bd;Id-^iT3t^>RRcsjyHDGbV5?H&))v+< z(G6-6Y+GMS@A#MUhLcrQ5tBmcr{#K2P-{Pd-B(fyE_prGwjLlyDE00$>IJ;taHV(b zB^Y15FZ4Y|-}%-p61J#^;CASG!$%4H;(~Zbo{}1Z6kx`7IFa$wXh@gNKTyLp9!PyA z~PH4}cTUeTgDC#Cm|=tKO!*wUkC@GAhWsWJ8HCQ?JcD z4yHIqH!&wr1cVikh?Mr&n*xV`0*p5WF-J({l@v=IB>i`mTbuQV_*!jh)tQrQSY>bW z$p*ONOO#zr)Rt$94s%aWlTp$^?s&?AfV7Oufv6kg7z{xUVsBVH6$Nw`_>zlpelH1t zZd(RZ>yzq*(;K8~l{)~wOJI^?faWfI`%BP^{CxIwI6QASPJ?Ty`Qd?#D7 z($}v(L`OxDnwXmA78i4+r=?~4`JFZ~Gt1xJcA1`;$;-^7PtVHoPdb+UU>13Vkp@E+ z$i6tEj7ilJzs?rn<}O1e{sxP>mYC|v&+TsIm4uraNA_*gTzqx@O&}t)swsJ9XQG@jGXDY!Uf8 zxmzAaY+3Hov5H-)_@sSWvu~Gyy=KL z9^^5`yk71SBA8EK$s0|e!zoR9QCFIHTF2H(Z3sGoDP-E~z1sy-5RTE6?3U@g0;BQ@x@jXyasMIawPFG1p0gGi_{V zPgm06=k||+wfH{XWz{qI{>TE2?7Voj>sn)tTeBamg|4 zZYScixtXJ$51~@@UG-PAJHiZmI_g!9qkLMu#HO|q=jD~+u&<8!ONbdO4yqlk0|GC{ z5bU$r_Cje}ySMHS1brN3@YytQdyg19Lrt0;WKgN?ZUS_Z?H{=L8uJd{PHcT%liWW* zNDGHHj*oq;AYIOSvq{4ixz3P$Y*~%_^mGhNwm9v?+)i@#}O>>Tk-9{)tb?>iR+dDY!S`aJYaf zb<8b5lQut$$rpPSgTegzqU67b^H&p|vH%|F6Cd}Keb>f?H!a%D9lDQTk3*BFgvrSO z0!_f85_V2PZVr!v8vUFEKc|b%(qeD0`pN3ibn%fiZPy9#AH$-Xoz^qV+$Jx&1G9vW zxSaxgj{9`3jixPS`X#7aN6w*z`88eV=Ng9oGjx)$n-_NvCZ|Os|x5)(m6N;_As~i4Yy`WlvGxP2LV~*p%j! zP82WL91QVVxFvSTURDK?k0jDR!ts2jn2*?|D($7>LR(W99S=EiWCFmyfbVx=)Y1wNxljYAYvE_m|*hu)bPgz&WrkMj&CiuA@VJdof8~wmiS@ z^C#*SH_x?N`8*(a_zp`YaV#xcu{x9i^O+I0euc}8QzyKJ*J?czlcl&T&71Ay2#Snr zp5-gOS?Nky3dY7{#%O&%fYbFNbHp3nnKzQOSKD_^N0+2ADb0@tN-$w2kN2NAGC6lk zeDdAc%aMeD4sxG>ka&odl3Av@LuPmRm7wR@0*RRrmGB1c%cn$;;sSs>QL8xhbefOL zvO|?V^=*7WTN#7reF}%oE$|12^$+?L40nbICGqY~IcJ*6`ADEX;f?_@liBcCCt%+B zC2BP*aj9n0IMEm5#M5x}g<2?PU!3Jle>UL~h~$O?8QF2+_HNx|{inD&K9}CBG#bKY z&MgDdf~R~6H#AT@0Ml-^VnY&O!L#Bn7s)90C<>iIIkfe~3)%yc39-Tz&!#46_JU4~ zoq9@@6c)1?8?Wd<-(3 zb>T(dn(0DRQ2xfMN9DXBI}HN|D^=k+!Luo3EJ|3*7if`QOZqc_X`1HLq6&Y z7(mtt(C@|E9`6LY<8y|1Rh5gD&;9W<+CLyI>6syWi{3Oj;G3Xkpsw5w6h*P4$?6P! ztK5+$!$3QF$QDtuy!-d>$9>K$o=Tfg8VTtPG~0p=Ps1{O@xPr+?lRAq6x06F8xHve z&uV-{`8w~v0we=mUw}w*4?kh~{DGw@*C5LN8lmwUE9^E-DVDQqs>f?G@7@HbC?+g< zc?p-UJsecMn?YZ(X<0H~Lj)OcM&(f}Bpt2FhYpxQE4K0h0SlZnomIg{z`lE~_fvE) z63_~ z2Pjnpd&KkH@L7gQF?tD3;Fd&0#W8QT_=LhNl_~*=ijixVnw}d@3}|dmRqz-`~FaPfxP+S+t?CK}wCRVX;#W)v~jHrKu;XXlCQ7BIK69<0n? zPipnt5YW1w{PBmzjA^|)jSB<(eZG1&KIMiPh8XL%4 zi87SHJx7iabCpV!28Z*q9iPz@xw7q$VET>qh#1#swXJLIZd;J?{1~t?zj>^ZqICYF zG{SJ!QEhZ49r@zU47`E;rGag;f91c`jnW9j9!B9uK6->nJJcY0s`ean8LxxvTZ?*K7>RyHRS2XOQJ>1fjnG`DDH0CukV62zt3XYy54r$O4F$f(4Y`@wX-!V4nq`l zK*FzUJ2(@#g^O*~i;V^i`I-=RbD`Bj+Ox#Ps?n~=-8hZnr@LVDk7pn<;T^z!tS;xO z&y*&WYhz?4Hp`_i+X^4xd$u)!%~s(4o>lEqCX6LIN|p|qBi4gm%()rBjw`x|98@-; zN#$sw)o0e68*L4prKH$mss(IL5%xC=xBQ`Er8U!m2K}a2nmPgYe~Xg2 zaU}tytBYuSo4k1_CpE9d?IAsq29G$P(5}{~xWMKC1$V0JJ(OT9<{u`!{*W|ui#>z11m&e$&jB<< zYx{;wYD#z$*qSsiI^1;Tw{&hM z&`VZ{#Lsjwx3@MaE&Jqy;V69<$|4d>{Jc#`=Z2{$=PSKrlUhv+YlEd>(p5_56kgv= zd34`%!oKCkOm3TLN7~z3!jZn-BC8p|Ui+?J%Z!Vw%(kboGOpSS)cVR}ayif?! zD{w}3-r1f44!`UmzuvpUytT~$Y&11qac--vRi?P3c!D~JNHEPaNKPyme)%*t@*4vK z)%=T>uK?-2ZcZ^*y_k$lOmcd9)J&jIe{auru!6$*CuBGSp~qk21SK$D`<5L%yj)+Z z7r!n*Yx^;vz4XapY|z0W{o+p?zmShuFb@GoW9F#Dbw0pip}Pis&i2$&%J%IeuTi6i zsCXpg2}13RD2Nhq(G_0;6*V-B^jZ_d?@%YMmh*tTfUMu)q}Dq8idEr~VL$HdbBD_r zaBdEsk=MJs-t0;ycnocSK<|jsHvUk!)MpMLTD4-#dK6Ui0Jx7C z$JNRF4II=lY%0KYCi5AaM9d|WmE_P)%x*7Z3k+j|M$y$$-`v`o4DAlzVP`SC+n+Oa z4S{;d68~z_h}0UY!Nm4FdHvIk-O9;#V{SuLQY3jr!mKGHi}WTugmU@zxyDF zkuYte&Qqqtx%$x*-=&>z;0e_`Xf+ZC9jE-`EKE=!_aHwVXG_y@FVFdq+bmK@;F}16 zn@vKKf`DNo)DS$u>HzNy}wOk2`CICp|*r;?m9p7C&YUiQUru{7;z}olbUE z%?xNQme;)Fh#Yv`sI|L0ugzt3jSQW3g1J7tn3|jW*9zbdp2o9IP1rpSyHUQc4v;@I$>^X_9n#`D<#uj04Js8LXcDJ4= zGVJR)7W|~9?YWSEO>>5a`?#9FyD2iRNWfL|o@QL3sQ^{o#m7wq*ViWVh;0^h1Xhim z2{S<*<*Qo^E@VMty_O){3*<^QxPt6Y3u9!YG@9=z26q<4w&iCG+%4HIrY-&CALuSh zp3o{)>J=p@Fc@R~)`;jpoYl%RgwY+-Tux#E)LnO)3iYNkYcazkRM&wBZv5l2v<1aV zIVuexXRUMIQgtgt26I@Q-`QlYS4ln1!JJbw%UZYzgi8oq-n~^YL*lmLkZfMK1ed@` zTDjJ#83{i!2KexYct{=b25{b)k~Sga~Y0ZHrjUOodSET?QE3nK$5rXQ{Qm1 zkrtJVBHwrutA!FiqPo@R%nI_V+7ZVqy1gx9d|7eI?$gxq&+aQ&h8m2gfN;_~Qo*!$ zqg)A!VX}CUCiI!|Pmn69T#VaIg{i&#^`~udQi1?|XyuH&Fg+OsHAdh@{;@3~IM($9 zfR%L%iQyGFsi1UVfCS0=%@PPHsP&J#YRcW7it6g&z)oT{M@L82s|nkH`>)Nj@L>3m z_Vv24l8tYYii>LIL5vsl6arc?vhwoS3OT3TQvmH3Tvc>wOl4~RjNfrLMZIOZiDD4@ zFGxrc)df)Pk=KTGX@j2hPG9XC(l~?V0o((t9?DUEUS`(Gn1KgqsJ z#wa&#(#mysNprzu;>pyA9AdnW7Pe4%RIFM}+s5h=?ND4+pj+D zXIaU{KiAyPNGsuOmBqh3mtL|6o%+uF<{CBc#C6Flk}YFaN;tgK z-Z{Z4qkgkwr@|#$;lLX&p6$-QYyP_yxHP@Z{DtfH4Zy{X^Sj?QXZ-0BU&P1>TsG)1 z>D%SKowXwQi7VTLchxT!@ZK9;rw;<)G=Y11y0gw?1&bfmkQlK)*u4*U%J?veqo93WI=E zlGA89qAlnJ9&I|zUO<14#Rs}ay1UQ5m$yTs#RFTRDE9PPsCq}RjlA~6iOU4H#sksg zcH)GQC+8QX!3;bCZ8= z!7Ua)p0_s^V9rMP>V;#CK{i1a9~%(GzI$K0%7RBnwPyp@;oJ|sXoC<59Fz_2jEEyru5m&c^HA}{j4Q>{1uyhd^A^Rp{4aIHI1zDa_g$KyE4i|Uip8I?wW z-|^xHfj*{81M@m9OxG)+)swugk}AD={YwHKHnzM3GR!?w(=#!BweSQ+_U2&<8Ms1Y z#WQ(5=n`&hSM}*r92@vtbQtDTD5G5Hcy_3{%kz-7)Q|Hkc|{(iC{=Hhzgt9pAXS!t z&_6)6a>ALhSPtjQ68zs~p+Mv*wZ)HfW316i;AuG)n4e%)h?z>#RYf+y%mzce^ohWw7fv5ynaWv@XGrbIns?Gvf-y+(+Z(?rS zoaaV7q>Cd}ZiQ7{=Gi{6V##0P)CMXa3GDu^o&RxWotd^~HR)@Vha!1ER@XspB0uWA z^kS_p&T%<;V)Gt_@R&0FOhMA-9DW>Dr(9pu=9#Vb(!BTC8z*_xNn`8qm_<~0Zk)fQ zCp5Z4^j4aYMfmPw5*)oZ;Dd_`#nY)hxT2K7rq;ru+Sg!shuRCBmP9)eiApxI$nrk0 zdBof=SH*8Stcmer@Ju7S>T>R@XYv70(spyYB!A3OomHzh>93}!%lPxh=JN=~i0GY* zip1Yck%$dOQ1V`AO*MG=F)eP67_0a60+HIbMH0@|v6^%En`3ir(HMkfa&ZXW3SLdP zO;C0h4lzi7M?6(Q0wjLcTS0VQcieVIDdQpi80Q&YU&fLu>`v*Gz%lK%%j`GvIfkY! z9!j?&5Fl4zTPYpp?L6Svx4 zx7eR4AAO%B=6@0obbcPcK7p>4ewbgo8HDqCDrCEoElPT{DHlf#>=lkrIa zV-FQ$m)gH;#;A=NPTozr_jdFL)N`cdw%(tFm2z-#TP!e3It;&;1^2xK$a7+0QH;rR%jiAQ_J^SOkePD_2 zr6|+znj}S;CZbcAK6yXiDLXO9C7xwfWcVZLVv{Occ6KDIp5XDs{%-_-7;#*21YFeK zQPGXJO;rt-T9_(r>QVf;490N0;^Bfx)zCc047%tzP3`c{mBy+GDtu}sPQUHoU;Z*N zz4hZ9yiA^OTgZ~FFGp=Bza6~g=8yL|j4+QLR01CX@R6oWLK`$u$y$wl~H@!{Z50?xC9VSifqQ#ldgy=QG8u zw_wk%G_!~2$dkr1zvS@AkdVX-R9>KJ*cr*`%g0)-A8Dv*ZRE(wI;G!c<|;-Xz~yxpPexg{rpt0<@8wxO=C{2gP!!_=7jRlJ>@=)wsnV+ zO8!kC6RkwjaT*fiJFJfb<)t-;jRh^N*cxyHTZd0ZtM{Pyr|7JB6ecxp{OXSh+C?WV zn%|^P>pUB8OD3KrriQ)u&1X>cIAs-;wdT~1s7ju`<{U;}i*AH_dYcA*z_;HNaC)b` zCb4`Bal^O((J-P+Nxs~`&wod3n_N3FYGiva?Jq9-`ZGhc8WFuCbQp z{b9g9&(nk<$)zY8wGu+;%cZ8zZ@~#hQz_ZHl}kg}rMJgU{E?_P39DyjgekK#0madZ z&`B$wpy8K4l%?EK{vI<(dG_7-1ltZ+|V_x&Ko>U8EuH-~r>nQ|wCHgL?4R3nf zG_SrTHkb0{QsQULc6Sp@eFC1bq_^Wk`L?i3&x&aok2L5v(|S*rwPZJ^YW}@_r-1E~ zfEykS8YqosE{cCJg^DD#iS2{E&fk}2CSqGe2Mlo{h>lZul6-t__R_S;P2Zj7$ zUJSn}&ZxMcX#-Pc2Y%GGCikGE1y%EiYB1jeQu*9*h0>U99=U)!7Z1a5c=pJC2dqq9 z&zKDhKHso6)2xSrV`o^G`%Ss&7k14^o@0To?OC(ni~D6Wzl-L?wKKW(2(G zIK!MA@~kwq6X|T(sD-&R@}F3<@k-Vi;W&p9T5>A#m;$*46t>s zq?q15$8I6g?bJYcztHG6k*9V^ojFKAMB?l{MPNBcIFnEAXB`tD$j@!`Yng&dC+~27 zMOvSxc85zZJQa(lJe+WXR}Wc-UDtb42_9j`(M?ZmJ8b#8QM)7E2OZNcdB#}GrL_i3 z^EO^&Eqe@2laZLHK27^hc)ATGB=GVK*uHmRpK|#a=ZQ5W33(LeXB@h?Oqt#0LGRro zCH|H;n0y~7gUSh)@u__-NjpYhf;5qE_HD%Bx0-Dp^C*c?LOmWqx|gF2!Drk8oZl~E zCh6K=w@W5UdVJKhBj_`gA1o0OAdTJ$KA3BehPg2jmh!RC|<4`yX8@&3|DnV{k_K&cu)%$>-( zfId&*5k^6pM1MmM+-buz}nNkXSz zFq`6L0EV=5upgJhypD9I9z2RODIwWx6p#91n`LG{2?GQ}P6@+FU-vtSH(WAd#wq@} z9u!zz^YZ~o#!;I^^*-&iRA&6c0KsVKrdVrA4nq`zFWj^F;uP0LCl)U!O$`13GalTbTkB)A>5^;sGM&dmZ< zfuT#vcEm-oPQ`s%-Yn>?d-EmtlppgfXyUF(hrn-q<&LJ@`lvtO|5t=d~mc! zp0;Imz4K@3-za=5&Rf@xLVRc;Dhki-_!FM`tCX7?*Z721T`%QeRW`UdQu<@c_D%lQ z!CJt9k)LPdH1l8x0yFdDkti&J?1UQ#j#R8JoURV!x!mNY-s790*VibPuD_|-74)1wYIhQSmJyb=tA!~ zH8}Hq*02Nl?~{`WHn;mrw7yEz+ANK>c)I`5s$~wJB@hMJ^BZXBYjPf!{uTlTL9A0a z4XedEEG{lC30X8-*dQwW=BvZGx1yq=8Tdl4cmEebZF#VYiV7VQlc<-M*9AKoh>4D_ zEZ8p~GwVN&J<1DHLW^R7!$$9PG+%v}j1X|1M#A?!pPm5ixwL1}X_!p5kaeJnN@UVP zD4BX4-ggyLm%zvw7IHwXxj%~@K+ekgR;R(y*8GyPHc+iH8? z8wP-9V-i8n#B^;Br4)B5FEzSg=vIQYbDZ19wo7!*y~J@wDE0YSwLOek+SaQM4z1jN z6ovv)1e|DFwUJ;R8#_!g);xpE4}u^ITx-#B;v0t)T<;0$+J{vtq}S{;BY;+mfmhSC zS!%$!29@va2a*aM`N4J0DKadJ`)o_QBM{3bOM#bd9g4p$VA@e@jKYbU@6Cs9XRND| zvx)A4$%ZmFQ{UIrTKgeSvNb757l%+_a=~d!f+wLQu>1P@u!Q}xWl9=XLv6VERGK27 z@?>51C;JMGv}h$=3F(VdDfBVT8r%81CBEgU4;$cr_HgZV*bY7J=n%4+ErVv0E(rJ` zP;FP_e{GFcI{K!_(uSl?HP2z@IRD$)ZAR)JVe6CrHQia+-&n)}J_o&i6UKW#H@khK z!4?dQtf5+|N&{uq^U=dSmV5CDuu+39dc`q6qvVc0zPHr9JrT5*Vww;sXn7{fixV4& z27&4#8O{9G!5d@WzoNNh=dAc$oKEp#Ez7r2gt@JCIjJ(*U>V9N4l}6jjvMtO3`l`9 z?Z@$=h1*o57)=?DX3HtFDl=d*f5)a&{+S#j4h~NPvPFP`a+RTCm60wvw2qk~S;!my z2C^<4ul&EwzZo%X1LJGduAwk?O>Jakr2ZLeih1$U(M$t4D#(&SbwP}Em{F{16!kS( zT>1q%cGK}KCgoglY%;IA2p)RoC%mlCB%OgM7~((D?B|Grx$}Z8M*t(UhdcL;WD
    {R%Fl)`kgZPHc3NDA32U*N=woo*|1Jg+aZ@?w;{ts<_BMa|2?jNRFam z;-Knaw1%6;Xml2KCIudLqo|@96KKORYPZI(!&UxMg-mE>gEW>Yj5x(?C2+|@2Q%f= z_89}}MF*EnorM7=4Y>D!@$d#+#E@J!U-h?h`L`{p%YzvVdV2cIEKAp4AP6@$rvX%3 zm+_&|OEXk{yz#1bw8sMWYPy53gBXnP=G49e2NcE-G|dgXT;iBl`61QI`WyX%If}vl z=-iC38(8#MPD1B?3PFvV^6?W-)xV9<{3K|X{V)lMFfTtZr5~MtpHR~Lt`vj%>)=~D z4Lb>b%&E8QZho?h-3cvjgO3?uxEmgUi;Uw|?cYDe1;eI4H2*vs{oH2n?*nmMjW8C# z)xfPQN4>R+_b&PV2gHd8eP_C&{gt{d^&2cNtt??Vh6_iof#s~V6@5Zj3oE=24Q00bKpb}^n@A=tdYr; z)((^>H3@KOW*CMWd8W0JXqH89Jr&Wh)Vtk)*6j}rvjgxD^8Vt;wgupcbw;>PzvjmcK>)Q1Q8y2fJ=fUte#DI`y=G6B}C1 z4D9g%OIGL%^9{V3lLHFo@Xyek5h{|a28_t2>{>Z+0tM`7suQE(TtG@T=sO@J2uXm|u zgH!8jKY8g#!c@;AfA)>^KywViF%!ccmLSNv^&xk&etS6(Vy+25uHCrvW^HEG`{ZuI zSC9G2o6AnZHhXH3!boyx_l9+dIQz2wfi~``o5*>|61Zn#rTHh)wgX`}AKOkAa&V!>Ml=7CS8-U83XkD#`*@ z*ghEl?%$@!>bk_~c-)-07r4vPusT9tsVAX_krQb!Xf@eCoi&vj?Y04Fh9ffcAtp!5EulIUWwTz?&haVl!I7aCgHSyQ- za^9oeqp3=LpA>RdCCIx?KN!+P&GVn52@p;3`TKBID34w;!{AD-Tz673Dt;j9PZk{> zcRr7PZ-WmtAnzR#O0lX8M^vg|N26SIvlUe_RHK-)XjHm^I zcJ+?yd5C1uHb85v5e}nosdtHB9hN?eh zz{ihqW75QpQ#xdC4tN)lxWgRm;XOOBt)FM(99}%mhGcLiAegzp9E{+IiZ$$hXxLn= z!Byx}erf#0VVhQc6#SIHPRmj9cA$IgF~=6>Mh4adf)O_ZwGzU=zK`KkY!Ld@bctY% zkyfsLE1W91z$O{Vj^@pltzKxWfQgWOHxAE%tU=ay*ODasuG719+IpKH%ml|(4rKGO zfUAC&HUaxTKi=9H1isv`d?AT}#XZmIV>#ttNrv}y9514SkrO481UCVhpCpv`(Ue>k zLMJT$qqidvr8w1AJd$Yd=bNrpyp<6Ao!uQx-AKkWTBnmL32;hhlJx_~ zgIw$&qwmM?b}m(E_6LpXPfZ06kEIj#E$Z(p4`g%Ji=?4qK&Fy`l$TwM^&cU>2xzt8 zI7PkB*1LYOnSWUvT3$=wN;o634$4C6H>Y%E9?zag)%sObg$A9RdWc?WO@DVxg9S&4 z+?}C1L8SN|9rux)HnSt8aR$B70>*Ad5eoNy3)$@{-HeS`OvWUMtKecll6wtvi3HmJj)V^xbuoYs3@VzUdHaF82C7T9u zs#az=q!8PG|NayguTD83)Svm3Pxc04*LX%8R?sK7;8| zicpPmj4OHs!0}hmSW3C7UXjt-QDD^m!*9WBw6r7e`zxZ#0J#!Fi^ss<_!kxyep{5G zd-*}m#f8uJ{jv>V z#A+IOHLsRuC2f$%q`IzLVY%R{f4cdgYO7bYC)d3i&Cjn zdH*3}Tr4{xjk3Qv4k?t*wV)w+Dh>c*j2mdmy#hdbm`F;LWcb@-P;K);atq3EXyKnY zS?~WX5f3Jk3WXyn<8a5fGLw@>N-xrgGHV5h;VLy8ktBF=YAQl#1zyc@VkhgrXC-0+ z&l+t%Qmp_W-@R6yXt!*VMrvGqS3BU%+be3Z~NWP^pWJXGpB9DBPZm7fS0rIw%8l!g2 zEOZ^@v?1nJUKvV>wL4v$qQZQZjsYeziF-hUerjqeZH#etb&|Q|=~`>R`)iaz>OEO! zh4#CB)y|KNG#7;frNXp#YUAZ_)K1j0-C-&MVqXxDV~`EvQ@680RUqh|pYS1mLJ#F? z0ZMFpWjllUg8m3@IhxibJ%bHN`SITNxcgIwDG>)0$6b3VleqAGQeb;q5R-rc9Fbwr z`hg_p6I|tHWUh(9eP|3dzIQ(8bSwSBii=C zs9j}5n`mnr-mRSk#vKLon9oRl3KcA0O8-ixUXO86ur4KXRp*a@3%Wr^x1Ms*l z0>l}_Xklr|=e2 z@}@$Kvc9@@GF;+w>+cNK!T@?Ng~)-euru6MW4++Wk_CQLXh}U|+HPV)#NlDE zJEcbCXt>xw{4BP-%KLsea-Ep-*rkWuTjI0oqYJzV-$#^%PgcTFYkYQ_?BW!8E|E?r z{VZzhjTo(eKO>$PcA2NolRTt1@L%)Tw+2-uO^3G*w>1PF{ESZwMB0&U3pS_xb6R-L z(Byg*#(6&@PU(`aeK0@mq;+;py3%d#k{ZN9@&co!YTNEt3nruILh4zxlM!zCo&l=E zf$lK_L+Cq`gmu9!S`fnN0%t6*cPw}MNY7s9E5$c*m&6OW@;dj}QYg}dZ}e|j)p-3o z>I3r`W1G7^PMi)ctgNQ>-DG&Ub9FugXUNUYsQzVop5bPU3kE)wRjI{Wh~c|NA1G2d;#(=cLg}QS}K-UFi2eq6->s{ z)rCtcy`;%eRgvbqUq-tq7}C)M?L?lUQ~ANhKtzsR*=cNUAQbw=}K^agN8Yh5pL zIqdsb6V+!JoK;t2Oj+9V4aGEeniA(bZYz5>o*I`C|JuH+IV&9eIxIe4tdN5rp@a$iMy1>cTAGh9uEDlx?==*=7jMts5Yw3d;TcW!ezT1Hnm&k> zi>R^YP-t)d%b%oLgul7ymBAg6WheFzns8s5{>--gUI`TtB45R;Aluiy(U_G7R|DsA zMJ;D}>4A$O9B0E0ncqa=6E#=PSqe6f^Od>T*218pO0v%*b));IeGAVN0po2G3U$ zN_NqS*n2WLBZp%?`^}AvbXUkH`qJdq+PD?XomU9+Used8O;zfB9di$BOOmc6Nexud zE9sb>==s!ele)0n9vMTdPHp+A#dRHJfV5o_9Lt;@Ap7pq*L%JRriqF3kp!*E+}%St zNohfg?}a&Tc7T{p%00aDFyS6CK0MebGB!?O#WKIV+mQC(Nri4x3o|?a?PhljHZU-7 zHBAu^AmGGTfAgmWR16uro(3S6=Q(2p5p$t7-`J9r#@h1ZL*hF%l>3fi1!E`XZc-!c zO(QIOe+xS!PAX5Yc0Y4a6zII^hI~RWeZG=;=W+Vg|18`42~_%ckJIFKkzzK}LM)cf zNx<*5*@JeI{xDj9dn5YNUE1)3yf?SUd9Yl0+8HR%7I5M+?&W%>#$s^yXRFTPDiv+! zN&w659gL)bi&KE9+unH2G@W}Vh&6Gk*r(?A;ybkx5lKlT8kt`|-#-(qUziPLGkK+p zI*C@Zvwy18GMU9xQ&Y{hQo?Vzv^Z?NJx^jeOSgRSZ^eFrhn!2Tz*h$ftjYTy(3+v6 zT*kg<&U2rDh`xxGnXj;`u;&*QSZ)#`4gP(BoK$jh-k7j)9RDqe zbmfkUYrrlX7^aN9K8D?Oqy6)}a8XW ztVIO+vLc|w)}4og*0=q5T#myxIW9B;;!O!2J*}WdP(K2-Yah zqP-2&%OOxQGj&_No_HTVf1AD;b^c)x^{8YXE`!?lw_AB7rrOlO9(KSiQG;TJ%x{Cn?o@WHiLzvs^^^=HX*c>o{~0P94zz$$@b zrlT{PWhpF(Z%1eN78ceKIZB^OUJFZ4pKxQ0iogHsV>O;avmjP57R-KSFxKdr$wZ~8 z{N&_hM%`?!r*qwn1@FJ*WCa4tsrqSc+~6?+Kcv_oQ5p}mZ~ZDp;s8Q+7L4V>n=LRI zL};L(>OVjYa8YU$+A1Ld{PBs2v1&`TDP|bEF(qKIU{YXiXMbDfAY2j7J(0w-EjlYz z(~JrP`M`VlAPB^o=6x-1?BSu67P!(0jP1uujnUE3SU2acIZz&{M-5H~1cjA$YeEOEllh9%=xu1wKoz8eK?CKR zj6nrnpzqdZymYi0I_T0bsbw+i5VV@av#)BM<@W&uBvil-Sd-tz`mZ7YrEU9_>ZydF z!cc{6Zf*v3N?=o&yjY_7{u>Vb8f9|I`!P>&nM8WS`=>gbMK1`YDpZgo+ ziUGvoM=d=MTgm=%*;m;8{e2=`%^U2flh3r03i*l(m9HISzG$u>`cy4huCf6>LbB`LKcXs$k-Ot^;Li=_XNGxRk#5Y@ zWFh`OF@e(a$+lzJ^#O#a+|E=@&kn0w=FEcF5{WVBPO|1t$lqln2CuG$w$D+7F#7NcUHzshBAp}5!BED_vwvWxypm1N)*X5rgGz;Q`QNF`XK=2dc*_)Jk0tDZnq zwWqr$nWdzq39s3|X`EHpy~4dO|E}9jGUk8X9f;PRw$s!WZQBACT2}@QY!OvuW8)kY z=~7Wu00VLWC%{^V>c$_s;m9v5fu(1KmcClKuz@)(meHKw6=+mOGNOrFfY+%GbS~}TEc)MQBwgbt&w%U!P!gee+ zpcZiMiE9(9GmaniM0mRVW%YOT_}770*Tw9%iSZ<*1Y4Nrb9%eGfi^FAfl$ZYJU%3% zr{F!aC<2&87MiTa&!5hYT5((HcwBl*D89 zX;e&3Dq9|4$Yob^bfLJ#3R9G2v&_?Lc1;Q7Ij;8m`?S}$UD?6{?QDUI6sZT71HK=| z?I&o@x()PQ*iYA@WH@=QTD{2w?3+rzm_{ZzP3rspZh!%754HpDm-dw$!tne45t!rC3DoHUFRv~GF-_VPYzx9{ zl)(?|VautaN|+HH35$_Ic83mLq&$Nz&H}a>4v6~4+&toq!AXdQJ*)jzTFUb|v%^h3pSv8jTKUIg09Ae# zs16!>ig9sNio9Rq$e`U1ezx9$wxYtQ?idDtR#rx4z-J*cVYsnHSeKmgfkic}Qj7a` z8o4^?%yV$5s57Za=(B%k)07Hi6)*;nD8pO*KJ03HeTy6;Ij@Y!Wz~^UjYUj;;0XD2 zF8lP5D-?JUso<2wb+mLMGk>i&#b8Bsxv&SCe341jm0{9z0G$@w&dtr0j5B+E9Q&1G zlSEGW?MxQ?2x{1Ucu-l&`Y&dXS4cz*n7`GXbazUUXjceRDF7-6WWGo>lVBA$r?CF8 z+7SNYzK~a}?Fx0e=%K%jkt@UDtq)oHbe-8DJQdOs!@l72U;@1r^CFFc$~C`tQ2WYD z=T9pgzg52}wfc{6D=RB!2qW@qMF?l&(PqDxjeq_y8xiWLR>vAZ(%+^0!f|na1JaL)*5_$9D%GvYlk!zV!Y| z+tz(X!yze@gY-XiY6vNu^v3Cu7c|MN_N));X|4|lM`1(fQT(JjaYir8>dwoTPGqiD zrHsNO|0@u$O8(yc+R75;LeT%Qsj672Af5wf*lcPbYIinwuYidl&^x54Q@mV~`zb9; z1@5g}aWU3g>?i#808Md?_$U#WjR0iRM+GMXSKnVPes?-B7Yhg4dR|qyRMB;9k`!UM zEZx5)W3}sT=-=S6B?cvDCJXl%f7OQ6+HZ&{t4Z7LP<+iJq0iEE_12MJa$EePx)0;$hZP3O%(}G&vKs=c&Wk3 zVTyvU$FBfh3HT5NS|@I24AFPK3&7@Oo)6A;fi5ys($|%EPZPh4;%Ae3b5$8a1;Sqm z%!k$T{z!Kbr{l%Bab^QCa4G`eWqXHd;?#R>{=g{`vD^>UzU!2_9sU_#g#01HTW^#W z+hvZDbtz_&4)exYq6~&}wNL(ir=hxz1+I2r5UTb>%6gT2S-TOS zQSAY}zP_Gw)d!X_fF5yc9tD7psYe_f~6$u;6Xffg$#3t zy~Z8iyq~US=m31#@^FlD?AC>|fA*7Er$HTsq+m%|wSRy`8QT|5SfHetz%# z)Xd)0R=<_wGo@2tO2R?T-h~@Q zuoX9B(gALNN%VUN1KSa!W)*0~`vnkecYVtZd%A7Tzcz0vuo!UTuQ+=)Xnd>#(T@a5?SafnZDjK+^#rb0qq zzNfGSBoWRIqC{mBJxs$eMXp^BZKZ9?9fDS^jrHX$+`QoU^q{7D;=BeAz)fJ`?P0WTt+QN%>}TlX;E zTx-XsD$Xnh-W0i5y}YCU$K4kojYYYtiQJEV;H;kWJI=W)F-!f=~zpc}E8Nu9VU^S989rF1w@%>lpvcFfx*&11Ct@{DGTzIR5Cq%dSLCzx0Q6^{=v|bww7iM zC9e*e;-(6~flYv57znFb*`(QPRxN@%kZA^sbcSI{|Rc$#aydGEaj>=Z3B*AxPjMB)IaD z#O*vFh`X#AAQ3$1V+T>w7P*{=_hrf2+OOx$yT{~uT&&e_*5vHau(0CVN4N4w&Ad)v zJ5rp@^O{Al!kSbgq#-s-vV01Gs#RMV9Cfnl;uEmv;%9 z#y#g!B3>*Ev~1o)%mdWo;LT_wHZ&qRa4ckW2dgPwkJv&kewGLX$rak5O1&FC>dZY_ z&ak521_S+2IQpLzE7a{`7&}{pgEzw0Xd;}yZA|1r`G0!@)$~dA0}l@&&}O7?locVc ze`GHXNRD+Jf{rMo8qP2)U(g?~4ObDz(COZ7JxG%OEWvN3y)WQ4+cqI z9>DRoc0w*#khTUBw^FgcJ#Gr=KX0{>3vpB7#cyNn&XlHCqk(P)Dmd(|p3%`%;2=MM zw7Hn4%A%g9v6X+?Yiv8UY1{kj<7xEc{ESZ)OdRuf#6mHI9C=x=BZDRe>%Bk2WN2+bkiZ8DHnU|}0{CX&sx53i-|Ja< z^+4J;v5UxQMXk-5JOHbnrW!*Pss|lrnRfx=O?@*u$aVq4x>S?nX7RWRay*zuGdDL^ zZ`FkY1OV+IftV)JIjrfO9O*<&mL_=-9M4n#G4FKIP*jwL->}qK=5TL=Xa{;;h~Lbu zw=FB;Fgjb6$R1(vdKp-qhpI3yM%0v*VZNo6o4nEKM814~vG%Zz%c2`D8B3#a5{hwA z6A+S{Eb7PRteSMg=|zt5E^?`0H-qraBgT%s%t9WP`1_X zoOv!0ezLOjHm*WIdKFBB*lSg%kwDnk*!nleOAYNAZwZ74fCE_y{8cyy3&*#YY_!}^ z-16Z(i()QV8i*3oYn06Cm~v||GfoU9a%HD~quWl&SZ=7iR)P5nO%`#=(PnoS%Wkxl zT)@l&KshxEM^#M1m;-2ZY{&_hnz~{y0hkvIoMR&fBHNo4Rz)T)-y)!0+{fzsu5qSw zyW3NIeEhTlAIpr&!M4}GV=qWs2E4_P z8lbKuIJXzS8lAM1876eJwQ}JD-7cam>Sb-3Y~YPQDTBjBBJIeu)YKZebleRp05}B{ zxwT91__-SabaGamb-fMq?w-lD$W|O9?^w7dQZ0o*EUB#GVxk1F@c+maF;mJ8Rt9}& zOW61qPyXT^!i6myswym?ph|Vp3AD?NGQ+O1i86Rp0aUUuz-K%N&Q}cObm>( zDx_O-&Be>BWoLhH6In5glFPw24S1Ny>%(Rl?CIX>T}e7%2d=bNMQ~!j%d8me?-$b0 z&=8u^cy~Yo+_X5Ms>oz+V`JlQ5}le_+?XMTbF>=bAQooX5q<^$k#(OOi_g9D5_WSpSm_So6 zndQRZ~YqeSA3^bnPJx)2t@v0<)0)7ThnBpJ8SAbIem8<_d7`D=$4MS}(X$Ez#wS;LF&RQ%dD3U> zg|HgK-8JD4SBXkv%v`+G_Ct^Cx0~(q3Jtl~O9+mC$rFkEo;MyiGRoFl-@QMG)|jpN zO86c1#Fw&^>kU`4!=DO|vrgRnqT;`mYM;ZFAFh^*JSx*uBAVLWW}7Tn!Oe}mg)3E- zdE|g$Ve)8<;GA&SIm_B(aA>PJy3|j* zVIYq^vC(r(6Ae3m-TA9P`?D(~3qOMr(ScKJGI_aB+LbT7cAuzre(>=@hS&Mou=glE zEQCKT7;pmrnJV`OQ@#6X#!e-N?;j!#e#r50vvZs7Xq*cpeQtQ6zH*d1>~xtHIK zn(@BjUVfTr{5oYWeun<&w68bTBJ57|ZI66KlH zPYyPWsI&_@v*E0t%FlH#q~%49xHJ%c=p{s_9-i7(e-4}Pf00RDdj-OX^qTHP4X=`+|J+;P0C$Zi`NFi}^^*tvMN?03TdcFF5nQM2yR#-1+twsxg(G>1s1)x-It69*WxCbGxb zK4o3G$Fme05|nscNm13x*MK5Eqfg%j7e$qgYd_C@WAv~jzmGf$TR#aa{s?(Fy-Uyt z1IYSWhGXxck91lCoC~QlN01zG=!}iW5W~Y~#3RE%_bUt};UNWGYn_{E={|}e>mIq} zT*~AW&SbrtA#UYFwd@9p$0Qbd;x7cjexi9H!O4=Grpfmm6kNl)=Pm*fm(Ls*0k(W7L)eb0ukied)2lqVRU%>E3Y+VROTd{5%k4m86;cq1WT%BiV%R8OyD;lw$|H4y9F7wX}a(?9~ z=$0OiE8^3;b^6NC2qbKnhH`}d@p>gzib!U;=eYN<`pX)d-dwvM#P>NKPY^t=4%O5j zj}3TVE_PlmGDCA1whxL8*pB9oEbN%~^d0e$Izz(+&u8X-$TU;~4^hpeeFc4Zj=f|G zhWU&|Rcz=lw|l7*#S`<%nlA%D&mLfThzNKB`bT476LRVEwhF{~#=P!qfY5$hZ=~}% z@6eXZ^JX$>dD*MDK<9Ux!^3yWR`cu3CGYe2*t3gTugJ!*W3lHl50CRvTELC<-;6V2 zOltY9crI>WC6`6(@J^01i~_d`pi z;-xiK?h4=1ux{&Xn{m)6~uE47wYWSTGDd?{;pVk@81+My=q+dUNBOF1dr3AOEJTzBZZxju8_8 zi#~LuWM5Al^P)NELi`WhwWAK)b)%{V*#_Dho!d2wupP-NCtBP(hv*wJ38IP0c0pjMMJm>e?z z?;V{$IZy2CIbUyiJqeKA_kfz?2na6Yc4ps)$jXcdz>K<0c(F9H7Ote8tYdGPC5RX8 z&Aydrn8`hc_?^Lw_%96e~fl{_TYOgpO;?0Tm_!NlD8UGY$+`c`Rgzcz$Ple?>OW(J0F>|F7?EEi z%tO`3k7G)aVZAZvJocp?aM8kQs*>zFGy8t{y);p1mws(kYJcZ6{{3~x1uO+tWy1Hr zRoAH3kX1`aoO`ppSOkll0HP4(Q~RA~hU16o>%VBgh6YA7T6-PwzfL2%SNuCgq>1-$ zqL2_^qO1#|(mAga^~nY%YNzaA_VwRN?P!1`9Jaz`ivKt7NFjk44bDQYbtOExBtTDm z3v>?t1Q1?JaOla*@k}Wxw`1@lO31pB^pFbhVAWd{l380SpciGk6ZO^+-L09_ZRFl9E2?LJ7>pf%AmHua!)5R zJUubR|7hX=5%!f)RdrFj#G#RH>28n)k&^BbDd`RYK|oMxBqXGyJEgnfP|~e*D<$0x zcOBku+&jj-KkgXsk9WvEd#|JS95Sg3HhJp~E~s}t6KYack;e%lKE9Zc9E-SWq(UUt?W*MFU!WsUg3+}Hg&u^vmJ zRSEZyCbONEil|gv|Evt>=SgY%GL6T(TSrU{EEaWx@NdrtqGb#w$8W%i1T}X$7)O5W z*#YMPI>Nt|Nc58A!pB#!1e7&84fcv^|1%+0*dih6sAxaFa3>gr0X<*P7;jMb+l+Ym z$K$;7*WkmWFhyYG&ab9m&c>hWkQNxE?3%k+Jn+q-IwE8}Tp-4K$oWo$z-}y5 zrQWwzhGTJb^4jmlIw&^zCwP{hE?uUUwRwbR$Ssi*#QoJ3EmHc8)SA!fzo2g%U9atd zqJp%5ii!#t2Q3T>TR3Arj~^gg5hH!!(@K=tZz<2|yk{!0OujNn2xVJ_+o^;9-fAJD z`fwsng)Pcv|A5^8Vy7O_i4$H#9>YikUdYUOi?(DI{niE9dDBH=`dY>6;FhFM9w<`$ zS{N0o|HMpp8_VF>*k@vZSF~L@=3gbuaD1@Aw86@ zI~IsbZ&@InhIq1-g-Pc1oe9FxiBD7F%jPYGygSqKjxj1P-(e|TtK;th0Q z56sD(7%@IqxC7~BKN6ZTG1)5{0e`sX+IXGfs=R3ol37+RyXH_vG;Ptsy9QxeXl1bVS}16|MW^K+PlHBzW&zs9Mi zV(rArCGwSd58}+7Y^JzI?)KgiUdBcb1{4U4Y^Enc_w82N+qo%j2sO)aJ)Yv#m`|b7 zv{BneY+IJ88zfyJ0#OdAixjs{A{IHdDN_XBnW{*LM-Bg857l9W}vWYAE*93B^vdsyuYm4ZyS*qUkb zX1en_rHwmZyFVlASXSlJ@9m_`SzI`bPmq7jsaw{$!f=FAsB#Y& ziS!$U$=NnxUTdjj{NIOsj^A!zQnTi(T5! z=Pz89O9Nu)C_KioeA6sd_Lepob>{CU2;MJmLUo^6f1|cJHf=Q0U7^F+a9)>$PP#_0 zgsx_k;`nspL#@VX5!PSFe&zbB|IEb&ercEr&bS2Y>+jnSuZwOx81nDGeptm=izY@9 zK4H{zSaej@8ZOArg1w96L61u{tTC7QP#tK5rZta(U0kg>|RZDXfi(G z){^_OVEGZz?{P>m>2H-({K$|)|7Etd)i|Xir*cHDPi8=>q@OlPh#c%Nr6g&z z>28Fm?g(-Tv_-wlH8LhlW@$}*MOda@yy{STbO9gW>gTNr_xYktdGwBA%vf1HRpzXj zu2V(j4{*d1^tkeKba?#`gOsU>JS7bSgK%8BTqf68oHmb9`|%t0A9h1q3r6KQB~9}K zLDIJl_kNol`=L{{cH_>4Ph2*%wK@ssZ1><*{c6{LOm~NJkxig{pMNdoga33SIa|Cp zAtKurH#`9+>ivO6)S&1BRj8{p`UY(hzdcbfCaM4a;vLjY<7!d=7l%Qe$$u>qG0ML} zYT#!|3oi#slY_9s6Djf(mw%IMR8ZebMM? zr9ClSr+=tKkr(M=ynHBbI^nN^1HfDt8{x~jJ<{Vb%KT-(X9Y?_-@Gh~{;jZ!YW~28 zv0*{56qm|_?s<8dFE#Ie7l!CpJMTMaEP%0z>=E1ZxtFBcU|w^o8RiGkfOiwhBBa$0 zMw*so&eEapLh|vTa1t{lRgDF#73gF@fd|}h0SAQ^J_C9$Ul5TrZU_m0F1P`j_>WE> z+6->-NSy(b0J#N_4+syDEJ72;^*cU(r~P`m+|$hrzE(NRR|!X5Fjt98F??18qEliR z#F1z4Eg>NoRAyONS<$kw4PMtcl}GvPsQ+%p!-c=At!eyisj*~w(MKDQGM5^|h+_&7 zIfqubZ_hN^3%c(w*nSEN3#+h;XGg|WP*ZcF#!8WxyE&qkX1m$Vyy`K11Fuxnwpx?v9ac#cKty!t^jTHaEf3~e!lDy@dOq~B|1n`i@w%?;5`AembP~( z1<++{a;3mPP|wSV%PsQ8p1P-}XX@S(bgD*tq~bFj&jsa$o}_?g<*F~6Dpf&^hPymk z9&-W|;uA;}NC|_kRLP|bBNUNA$M!FGD#xI!w*`Q`px64EJGl4|&_2o}ZoG6(->H%^ zI51E$3Sb*FO=%l(^$b+mwW0^~bC`$`HzUpe8IwA;4lme!*wLCd=vyZJo%3HVEDkAS{{G)tT z!OuS8%5R6SFcE&}ZN+AycBA>_=O2DP`~7;#flYPuH%rU76M0O)Ljw0N1eSlsc3)d# zWXwfRm2pGZ5PmTW{aD9j_a;hEU5$7f`*&l*?CN;^NfM3^=g6<^u_w{GD2hZ8M`OG) zP#4rKf~741G=9|NTTn=Em6V;B@?xH&Nw}HqJp+bE?e_#7+zK2UJM{8B>1mc04sUgk z%xAA!d2?zoqyJ`5^T^$S0mG@je2P;N=P;ww{|V=A zP>9Cr8P28$>6HQ1n(5NMX|x_I?*}+r%#QqSJp%u(Ur5;6vNX95%A~zSXJ~Wp^N|aQ z38G7u#K=PJ#)hrd+&%W^dQ0^3G*!utrTNQh-j7Np;|1ZNa{^%+Zscbd?xf#H}ua_@A*UMH{~9yeuqFf!9vXdx9mgL@F&w zoqzti${C87B?D_lN}wH^V`)i^QOt6JC7@TDg8dLte&9FeJ;B+${Gr#~2+!v&106;U zI~GBY^o2VmEHtvm=lDju zLJDc%N&Y(qI)QgmlI8gFS}zNH#IP<$>!H_a<3lV*6Yh~a;zt$JWNZlOa=yi#uJ7Kl ztI*C#OL8j^wI`#VPNenF-8-=8Nt0`;s6Qh30~(NTIH)a}lE;m9D=09^GG>fo3}-xQf62 zYDW$|iJkS?rqS*ENO=5&2+W`K%_h(u;)9{?H8uMcc-! zkv*HXtBYopw!31IjN@&)ox$sBnf;@v1B|1QZ!tk0bDH+BlIk;5hr<*BrR7e6?IEq$ zPlpb?%~gVi%*Aho;=WT|MegO4!XJJ3RFIg7rx3@ES5#YD2IfZ1-I7wC?WTOOhgrg7 zpnZjKS||fMtx!25U}V=@318mCcf3QJA})r+q501~FyH8y!vJb-{B&p! zkL*^eF@|Yd-#_K1-Oqxz3eTuYlRR&{1R#)7Gjny6{!Po>+o=C7$Kvu@NyZT9mm z`$$zOlc^Ft5*`cGCXb6t!y&IR6mgQY3%%TsMS7;ik`!mnn_vT?vTaYp{4EigUzqE0 z31jbI=ujHBFyTFcp-BNADR3gR0Xaa#0~Y=l0DpAoc4D{w82*P~0vrFjJd##6`H9_L%U-EPFNKC6L!~?_TK1>3E4oYFFmf)cvB%@Pi=%<<{l4Czz7Riiv zWXr_m@Xr4c2Gn4JC8srcABeS$ISZMuOQ7Y?dlBQBCP}2~#s15BHFs*P3N}YWTzOU1 zgz3^BD(t#+%zBWQGO$)F2`SpKvNz8e!Ws3A`m@nYPOHFN=wX{#rM@vnW^%?R1ojUw z^q?Zx@c;b7NU$B*({#^zf6AM~ zRiq(8t+bGSUvO}Vsi%&4jf3w6A90#M>C6f>^o%q zs|Te?Sy6tlfruhpU0uyo%OxmHV?;H--?93?{tEJBeE=5(2E@qF3B|FKb!AzD*Ara` zOThAJzW~x2Aj$b;(<}3TYj5hJ0^tw-OOAK0u7vS$kYAXDa3SfiB7HMjpigpxEFP4c zdHxyM?ceTF;?`q%^%I2Jc!}BTsfF!C?`t>PFL6!E7TQjby65@2rEKzHEjT$ZBWI#w z3_N_GZxo5(eYc9h$ZPQYhQs}=v250dM;bV^!k+b!%Gi3d$QnYFZ(h$&7O6=NtN(xT z*?)}H8(>>4^L6^X|JfFx)?~Ufe+53;=piR~Q%jBprAf3ac90)-zF~I8gb1UbXhuYn z>#;6(3i2IlkM$2gy>xM1Ov7dp&&H>M3G1`WBgU~LjE%IA`MercG#OH$2 z`CfOW@k86mAR|KT1^nmhuaCOBmP}&`;yO zuccK}IS+BWiVut3S;F%v?BuLN`#!F8x-rG)M`6R0A&c81VCvMLOGNA`OA1A-Vb8*7C_jO(>G3};Vm6U@z_o)#3 z&tRG+v-`GItQ;tx>BHM*#~(C?_&{~34Og?)S59u~h<`L`4Q}`Mn^$^ppWvcKkRv}4 zFfXB{3U#_|ap+YHMP4sh`@u`063dgizgF;yf;XPNK@y7|x$Y!Z{_?3Vy3cGD-eN%9 z_v~!mmz@}|(VFq_&!*qeC%A<0?HA%c2gqfyuPrND&ReN4uE;NacOz*(q(39OR6M4q zeEM(lvro~lZ5Lms8KF#K=cDjo?Q>OO_t)ChArw=|vB3U*0ZoK!y$J=ecfk&C=wD8q z{Dez!Cr$a7e5$yc>hw`Z+YP-6n`%XP5i8*tPZ8*fp0prdPReEDrrLE1|MR*3NR#B{NH2`m z@Q;67oxXJAZ&ST=0IA)+rKp{f`Mur5XZ4A;>nBC!HY$QDyhRo10S9)RJzHOZ*MEc} zFg8(0UqmrEM(iH`vJ7%rM{;@SO`28@4eX{D&aTf>BX+G(lHFPD zjs>{5!CkhLki?(30Z%k$wpC(XMamo%@Hs-xDlF%4Rz}o(Yv*^_ynS9z$s5T8j{!+U zAZU^+EH|;l0I30`*O%1Bx({W+e4k#M`Q%#QdJpl-$xyH2UdoDW0Bugqyy_Pdo3`yAY1npmr7O?TY+PF^70*EhNR{d@+oFwP79 zqbA!}bwYi|QOBLy*~G{2oE0uPk7rW4_y-s7A4*}8tr)D`IB!x_@h|PUs7c*gAeISj z+bBh)XGZWzd`sR+bq`qyNj|<}{S&_8qAAf!0!C(K8_&GMoQ1fjb2NKN1|d@qQ*_rVS%AR5JGfTVE*aZ)|e{p zajv_}L$8~H{jmgon}V{{a)d02{`K9jXp--&+d)h9 zGgq{$NY~x^6sbh0&}=&^gfdo$SW)PXd$>Oy1zv3wY01zG^|+dlC;T6p$qA<`@ucTl zF>bBCFni^<_fVnc6eZp{(;s@$Pfb;f0W3V1T1oC2+Rbhe@4NYsOFU;RyW>ISBzwa* zY2g7ENl6>&M7nLv+b@)TH|ivARZef<=0|rauJm~~km`OysH$9aOlV5@$u!Y5-CN=> z=Y-{Oiy{aQLZy{lS+Q}kkm*eUb4`&qurJ%hd(wo>uj!lyyKSi6#{{cNQxu`jv6LFE%np};0<0C97@^2~4 zuN_0ud4O$Lwodr#QF=P!?LU?6UrtE6*a^HZdc=dqdJVNYi`rZ1GfbqcpBGHW6za2| z4@S+&$TEc}vvQOLE&Zn@mgq0#=I=+3=ea}>>^HIs^`xh2{^Rr`=e+$3#(8qA`kBo? zBRiqt^<~MvFH}q*u))`t9EAN8_oeUWNOMYCu|}p5Jp9qajX!P)NeX&vA#W9DLN|k{ zoAdYI{{gX^U|n*89(X^=(k2dEE<(!_7S1#9Y8mq%waZ#Im^A1EN5wQu#=L-wh2bVX zPzxvHx@D6H{vQSqK+RN;80zDnm=S0I{z66jCi`Dz16LiT{!uU+hZHj5>UZ~cp3v8-D10rJzU2H0=#OZ z4Kha{ZL}wdZe>XSjgoRBGBDw>5UHEmXoDE}b{0QKk z=+red{IauYceJ#$0)U{FDQMW*T4)S?64UbJi@jZe#cFc?gg-07zO2b$0tb;K=o6&n z=O=vOeJ&kE`J%+!W19V$V!6dPlD(2zZOEFx#QXQ}1Bf*A^g=-uw#nt;YJvdncLdw~ zQdo5Rdu%%btlSs@!l~pKapi{-tHB2eaY${*soQLtT2DLxtPNGOHDg9EeYIq#%Z<65 zBIX0);t0kpw2X}2&K^B~k6tNW_Yzu>-#XyH7z42uw6wN1KV0dU@)nNSy@OI{>Tt*o z!Z;dXE3YrKI4%%*%wnC=7!P)OJ*vpQoxh(lQpNtcRiO&(z26$k>Klstu2Sp?fHB4> zU!G3)FlvRlijorI-t|3K`CsKMPjs|!GQ?U6pK;srdmH+nl`%4AHPurSNH9cZ3P_0r z*^ae3r{$%0sJ+EoK;})i!8z+yIvPHsZqJx)`H@}39y0~ zn@6HXQq?p5NEpyrR1doM$Usgq3+fsH&--}0ly@qsk>QT8xK=qA|w`5B>GWubtmFvD9tSIfH? z*xGN*KA8JK7)H?v7jV?jb4W-aH=9w~c0Az>Rg5s3sW5eManSN!DPb3x621vdxjO1{ z{amcG8uNu&u}8Zq1g|35!3`Iu3~DgwyJICSm~ol=wkCXL^9A(}y~qPJQCCIn>ap$| zm?%3z$M4*naEVfGSxZN^ld)fPUnDG5{!UE9nML_tvztXVZov92!+dvOSen0aa%SWO5zzCmwc=Ecab?BLuCW;gc`5V^IF4! z3v3qSe$MCG?X8g@pVqR}sV|NJKeu6P_3szF=PQxfSM$Eb-Bvzn(TLA?oCHZT6W$_+ zDDmEfc#-!MMb$%d#;G#$g? zXZYw%e~Endo`nPDUf@YXg_!s?`v|FRpPvg8n%^RgZN{^OXzH$vgx^yEfx0XxXd12V zL}MEAVS&{d(bK0Vn6}9z*V8+&F26IPscSt{OdR}4gXM7Kqb$i%6eM(MM)4&G z^_^G(%k+4WBSnsfBBQf3oAS5S#_$P_&obW;sB)~f&1OmNr<;-wSl^-~wsv%LEg_ND zMhP>zw6qai{&sV{-=!t}ZEVrE`{QD`mNVwzsvJH%h`sQMO04%}-ier6nMSTpeNFoF z_6pTfq3IFsIh=C!lx(e=A}3|fmE}de&l`it8BE0JG$7YVJ4a9(vP@}LlW6?w`}!4w z6xk-6I2FnQA1^n!wm zsiHIMa2ssCs9C9ZCTWgI40FBf<`Ja*0#380w?^8UYPs)}2WMo*D|j^hep&cpxhAK+ z6-s;-F?L($l>RAy*$C8rcJ6SLoK@qHu%4e6^jK(d=tG|G3Lj@JE-x2RptHKS-FJ?} z#&~dN@uqc-%o+=XH?r#Sp&Gb{)%`i30?D}Q0jzkebmH5JMwm4-P%;Y?h{S5JbYww^33a?#D2>DB*h3CtkXT!e3PdD|T_!+BkM=rP; zpS}0|gPx0Da`1I8a%%q;5tQNllX_;U5s^hzoP$+%%1WWE0DY@ zJtc4q`Y?y(lssf7L#NlAW62WZCa=wxa(C6<=AA^zPafYb0d-oyTSj+nznz<73^qFA zNZ#0ohY0jDw^qBQQvCZOCx-4qrnCtOpgZ5}mg^wZ)w)jNN#jMBY5(GMu4(VuX4i7$G$!bpkbBh`^K(wNVOxxD z{f}}P-Stxr^A?=F93~QR)dpJ|Q4hKml3}U-&PD8)Nu^Pi?3a;o+g0C_M&4D`Cx{THi^ zj!e~4aWV&?*g*c_8t|Pz3#dW5fSHA*qrv6Rnikyf?^mc}?)N`=k`_-S2g4Q{1C9(T zs<9YT$@DoRZRQ22JSlh66O7DBg%H`siPe8Oi1!{+_lhq=hB_3IF8E%MxRIZ!t~${rnGI1P zm!HDL^~1ba_{Lh`Q`u6ciFFJ?rwC$WnALo|aDEj(g8-PZ?=;*$o*({I7$&}Mf^}J> zMCE{tWh|~Urk>l@SV!`cR~j1dlt4`Ft)%Y`{!h`hgb6!gLcb{*Y9e`Sd9NlCI~jSR z|C9%q5k{nd#LzhCVsVy@1H?5Y4oAczz?6n54|Tfj&fr>F!Vl__X8uR?(c-*@0Kp-^ z9?djr3ABI&&Lf-ROn?yS;*1{;(p?0EgnE(t4^9W7D?2~R+#ZT9j2s8+7M^m>m?*-f zr2FBJ%lrQA@Xn&gM2n5?kCsD)U&BmNPPLFXk~d{W36FewrP2_y2so8IfURWTzvl;yNzYt2lwTBAPRX;X^`Uhy%r|6aLTC8!oi7hf`|h6} zpX&sh(?iF-c2&6fhIePH&%Q=}E8vWxLU>ldF7%9xiwoP0L{YDMG~jPw3_#lt8=ITv zAk~rJGmLhoLm}e}F;ms6PzNTy6)zMew zMPbN!7q)oKP)|U`82@AhJ9GY&5qZTAqTmEP>%ZpMSzCTMwB#USCu+n z?&XKhO?6q;un#92A1J5XA*dzWZvvLgIhmXxZ;8Jd(4W$;=pOr)oAC*9s4@2{C@@8A zXuhFCaFL_l@FsejP5u)cda^Y&HM(s78l!duZy|P(zcZ(AE8_z}f9=cU(&$1ST5deQ z#0$L!3^gryx{3XT#>r3e?X=Vg;6Gei9Kiv9QLCXucx2Z}^*PiR6ipFep-?|cQJ2Lb zeIoig-o-thmX>BGjf(@1i{pn1a-WuxjD~OH@y{$G!^ZHl=fTw>zBD)<1O8o48^2nb zo6})YiWbVcQTy%ZEb3OjdxCZFd>1-bJV1*>f#6cW9>O9hIQ`FbcdizLf`Ve4rl46i zLPE_Nd+OHt3E2g|Y$;|_C8|(_gdr=203Opn7fnLMjt{{O&iaTy+F+x`K|^pUX19qn z+jwh@zqxW%z(9c0b>Hxkvmtoaya{&alz$=GK$G=M5KO-Y%nI-iH&5 zTv_$K7lu*aeU5mz!VM<|7*Q*3co)a(f}rB=(A2r+iscL?&~1`?hXK!|=CqWsa02 z|M3U*Hn(SD9@Cp8Pl3L3kNrhUa4}UInNqo~9mSuk`OSq$iU%hYM$TlCD};ZMv@6aC zF6HbqpUEkHPfc0L;N^9i97-yD#7(SnQ}pq>C|NYW$~ym8fpR0bdmx^k5RaT5TeT&Z z{RGrUz#Fu@+HN=T?I+W}5NAMz4CyYwX4)K(c&5sYot~6ZP%yq>fbH-RMiYC>j{LlA-3k-& zjo^pt{I!Z2tG+W+Aq@Sc>H(AzFAoo#5b*OV11HnDdWIVR+i~K?+nZ1Hb!1w>PQY+`2u4!f3DIT#m>&IggWpm2LoCpoVP9JlJ1)~j>E|r0*+*$(87OEx=2O> zVwUXt=Oa;@e!w@o?OUruNs(59B$xeT4Oh;ux8NfLt*xy(+)}dT z@0-TBKp_y)d0U_P_38H13S5D!#+?k51za3#YuiRX+ht>G3*VJ?!ZsHw)!q&2B1-b? zNE2NK;#h#7fh#TgZ#@XPio`&t+R5bKDxZhXW1MQS;jw|?a(hdy^T$fcsY1A*fWC@+ zR@E%CDa;qO0aacPa!CK{_isJAJe$u+&ppcc--U5$h`6z_u_5-+xW`s(La*Z!63&UM zN=t{2mG}e%^m~OHRX>M*7M;U^m&1#$LamtHo>>H7i*W5@XCPX_UHW$(LwQL_N#Dk9 zBc7=u0Nu#$Ug_%EJPF7%_UE0Oja|!?f%#L|vyU*r)~#(7#w#o@A8m@s$xGmTV^^Aj zE3NbFTU;w$^2|*0%2)TWTfArEe5`?}-Q@7S!8ar-N{2E73f>-B1IJ{f z&Im&JR%R4MA)d0UQIp7Z^t0=U(1N;kQo%s43N+4k^3ITSaWX}qr0ZLPmC~%G)&|E3mponI!ZO}dPFgVguS zQw3-cuWu*j%J!awq=@${FJkV0ljy+_OHSD>qB&a3TJ`>X`bxRU{_*4Dn zM$ZcCje($Zf^eM$dCOm3+1VlXW07167;r(wfInmJ^nd?X*{1^2(}DN!ZW;t5cQdEV zWyfv#)hG?OXqisbWKA9jdtlW=Bp_&o*0_>0Ew7BoxP_@Q{hbF#y5L?$cnk~@gf??sa&28#rb`b^`ZFwqN1GRakNbESL~e%({f9#KT(dL za!`-2FQ4}m{&Qqxzm8yfhs!_}`U2}!(zphj6)CV1v#2b(CQ{n6gfC-z^ln& z0_TmFdrsT2^+8iympBK~gffO`8&YDT{Zt}}=~14yI7F2kyXD>z6yO=1Y#2%0A7}^$ za1wx}^7oCC!KsjnKmCR7%k_}avN!Op6_JxudwGBE;dt`pfm`8|hRbKuE#Xs~-#bZ? z73E-lh*}ibPiNQrZo_sLyfM>TqGg}ox5-AGwY`YjKdFwc({Z_~RhtOI9^w>fU<1qo z2O_RW0{o`ipb}ICl+@0@Zb+RXXY~H91?`Ze%S9MbM+2v_H1!bdzL+zw&oS4JA~xTQ zY}5bwyT5%Buh{$eSa(#LEeC>m{}Ef3FlGVYx3Cw(4Xn~vgC*-#FoV-7(0>i}gPVtk`Q7zd-{@%X>Gq_Y{Px4I zte0OZ%zLp?-wm+HD{A)XJ#@!OfhlQ2EP~Mq6{xP<-oJ-&|0bJ`6ZF-x|48x*`)b*Z z93A&aTqo8aHyAmX%8DmaYI2zW3-kdXqW&#e#C;Oi8y^&%6qc1?HIOSZKQ1RVSWfh? zfMe$i=B*sv8XFCHf}k6sKfO_M1_n>{^^rMBH&ru@AB_tU+!N;|5RWOe^iot-uJIyZ zVWDIr3R*D&Fr=@)U*mul!X6(4=C%S>2*G+?kbw|MMU9Vm{FJ~>W|b4pn;2wfGEWSF zaZ@9FgFTscV4qT^fed13c_G2MVR2pw>Z&NhlqVAe0TWdet#odx3Dl1 zzkmNy!j5mt)!BJ7IbI6L0_5`fP+vr}K5Ja*oXISeRi$L~fHwdBSSC6_LBSA1sg$^U z=7eJ43wgc-33#kC!q$`%0rwTrCq@#lmY**-Awn1GziG|ylZ0&F`8Z_@O2;db$y^?Q zE+zsZBK>^kOf9`k}jm@P2c%F|NnvEi@g; zn`HM2z_IsydRt_$7e(^TmwbV4Ei@xGHFtCSV^l^=aV{9ekpys{bJeqpkkkC!? zXgE$Sn6JSFdn`^yBGF77chg|3qg0rWH62iv6C{>dD|z?mM+0Mg z<9bl}s(ah;{-HL0xhG&k;jxPIBF?fdCYOP31G5bNfS|*PQf4GH)K4Ndw1gD!)GJs$ zYIW_04H$-tl*E8pVz5Zt$3*;RpA%QbnYBs&8wDZ!JZt!x^2*et?{YcZeb;99_+VB26uTChdmok&E+jPnvpAs6hOTdTbFzv}UR0au_blKnzK= zt;dJO=!)c02c@Dz_npzgp7^JdJg`sUA11~rhN-_){*+mNj9uXk&eP>_mm2_Ep33 zu&nH}XBzfzJjEb$z?GBm!>6>;z9AQTi;(czi&syKbn6*o(cTm2xDhpP&&_SF=)s-m za!xkDnUksEo=)=d#t^eWusl5nf(Y?f&@(G-tpp8prQxjtF49ebJM1^I8rr0mQnA1C zT;a}{N0Jay9q+vq&6&7?#1((ekF{4o)INDNA|m2QA1U4dERZbfjHX_T2VBFI%*xKs z@w@TY5drg<8J3=>PGSey_Zm+QS42#H3qvEz5e3sQZ?Bz}IF6T_eKD-yipvkhDAl}E zhto1L%9mtB`I84&5uPbhcd%oVWxrp#ojxSw{d)HA_x)rLPfvqG=Ex=(Glu) z^*rAR*^n=`5g<*fqhS^>d!~22rORUWEO$nu8|R@d^;Vd1UX;560DC6IcP~DuHD%Oi z4-XFyibJhQbg#z(4qfOW%(|`3Q+N$Yii(Rd&d$#2iegz|Ktc}yiErx^@y&S!1pzy5 z#U8kXw@pQ;Gr(a7aU_rXFaS)9GdH5j%c%Q!G)uxxF+ANCMlPV#tFNCNJpR)j@y7x! zjwQH}##>+crS#TtK>A5L*SOIa7kC5Y6Zj2mQqUdn0_I_%o+CC z9rwEnCSjYYHHOhF3!PU)d~(m+gb|-8KDuIc1l+Wa#lglk+!U_;r;=J)u~$`Mfuy`v z*Qz)@Ir||EtLz9nK>yb()wvU`Z(sm@vox+AoC^`C(lyrKx*Jw*pUT+UJ4!0K#uEOd z3+oA)D6@{gm#L5S2M`lpx3FB%gILbhySV!vB#4cK88?wb>b z)SgdD*T4M&qfS&sqqMXXgj3@_NU($JPzkCr0KOH)idk^OeyI&qadmyRw-R83@fAmi zR3{?m^jd+!gW_C$(Hhj~ol27ffBaJ~FWQQx*mfq3&BxGELV8nAI=mZbXa*1ITczgy zi2`cw-w8_J$=tL7YTI)&&4U}TtUzE=~H9n1q0-&uOppOKgo;V%oOcL(XPYC?Cn zw6jH=vWDtsbZrk@)UUqgaY5FHqO6ev1B!>1+iu|F9CY0zZU4FDE?npC`Ir|NYO7-! zZS)KkR<5o(^+d80lK{4Jj5=8O`@0M zTOq)NnIN5b?-NG;Vx1Js--Z$@a45@|>e28F@Qi&d5&qFLAoF%8sbgk-4tRGmGO`kW zu55I9K>*M~gb_?Nh)UMyKRKCCSH7v#GL4K$dwYK_(=I>F_$J)qgef7AMjag;y~MP} zkXqr<$kSn8W;iaXsr!V#g8B?A%Tkx%(S>*d<3G$@J#;*07OH*|drE#!U3g8q5&-rJ zHnXaj^s$sdl?kwNVt(N8IxNe5HNK>OO!E{UX<6cl6!koVuH)^>qe(mCUU`n9I z@zeI0k>@)c91e?uw50ZQdN7Mp!D;N|^CiJ>y~zj!zl8?Z{qYWqR?G-E#G7VS#tV1AQ^ z<{3QK{jLnTup2~))4-qMJLh)0@#4Nnu_*t%U379%SP_$2kKVEn2GD|-IlW~p@%Ez7 zxphXW3a>7;H)^_JP?CIe*otv|5_0iGq^bDF5B8ueUHzk_WY%`TbV>a3CgP~&xV)Mb zmwV^MquRXB$A=SDiUpSHsVr<@fH#x5_7VlMQb;k zT8Z>goS72oA=2olwuSnZ3M6cwbyKzsVe8S3vU>N`X|~T1%SP7*xtsYp4(_D;H*23V zg~O%tj=TQ&gw%C83T+R>s*B_hj4qwAr?=hN?%bYE=9|y6%^%LFMCskIFUQu4hU!_5 zRGse~V|O)tPe>RTcl?h0>V>q)S4j3!btY+1aVrM#q6_XjC0CbDOQy_^?jIh6eA?pF zZr4H^HnQwXex$qHkzOvH9+;l(6}#*ml=pm{v^Kf1pLe-_Utq9_?I7yg^ydBDjc8&* zeDKrs7I=kz1?EIulPe*y_J1croB6IK_U>(M_&XVTQTiqh7~+esDh8 zAbmdL^MHbub4Mw$E?vb;j?jZo#QikT5PRqxm`$;JpQY~pQvDH*D(x@vYgqkO zvwc0ypkl3zPzU=@l;AyX{cywZKMps(#$YJXSFS^|4ocKJ0zX9^jj8BdKFOz!yRY8n21? zZpQZ8Z-dtxj*^~M{##*<8)fM&zGw;SR0xt+ROs9w7V!RUcM_hWQCZQsljv<-!nRVX z*ND7cJ>W7u((F9kHEl90&e>JjW^ObPn$Me@QFhrwDAce{ZKQkVe{)`U!@-H3 zPYc%CnbWV34I|rUEBQ;sd1&nvMq_mz{$9jhf-UPa7Tf}#b+?v_E$^w@(6UI*7MJCx zg+tv(u3u&UtfQH ztBM+QB^U7qUXzcX-w*#BhA~FyqD|V;qk@{|hK|?Uz6TiH7B=RSCS4^>DS0_3HAT7S z)Rz4w<9vRZT7Cyo6l`3^n+U>Fb7OW-TGs>H)x&EqHKcx>xvBrw2MjiepdH9^NxmuQ z26sNE#=*B@7940f7R?Cr-_)AWM9?8nR?FihTD2*0W)7>2V@aN#7=L4I9970S92sTx zd~1mohXdhR9(y*DS_lKL))5n9Qyk^=EM>M4J>4gj-nj5fH)s$c!B$_`R_3_5A!iv-~#_EDV%` z)(vFW*Ap);E&}e$i%~c*AuO>C1Lm0T&}MY>Dqc&jF2E^(`ce`{M@Nq9YXsvER#66x zGVtS7MiT?ejt_8{8Z?%;ZE_*2;)To&G3o!2;~>{DQw;=mC&xy(#o8Y|HIGaXZdDZ# z5Hm1&ZaF?N;XgH{)7MCm#Pl`4kWMZVptW5q64%E^qXt^0%hYhYjOR@o`?Nls8{+YS ztaR%8AvBAaF?{M1P!FpcfCij*$y0WNI`&gR&Ht!WoP-ZV<3GO5%dJKlNPCNTlco|= zqrHH?W(8fop`a3g%ay88OG62^JTIZ{4>BO7?{uit$4;$&1+RmJhn`E2|LMn?9<^CQM^SGrq3*DwvKW+)7{?#e(D20HKJ z2aV5IJ8z(>DFVajcalB3L@XGBXrLM86Y7krp659&NjnI?2E|GVbNFFYJz>CLDR(Zr zSxuHUJ1z+#{5%WB1u8R-M!6ZLnk>$DO$Bn0N4mqOUav+)NRI*E=)kE+tD^RhmCAdO z)@7B$`ICUiMVRPA20YUihhf8*S=qQXu9M56I;2yTn(u`D^DDSho1|fyQvP*>l{Kbd za;zIau^e`zTh@v*Y}58vhD$3M`o6hw4eU+m90=pIILRM~vzBfexU044$}!UwE?EemKG1B;L?5B5*k;d^@+91kG| zo8xFg^S=wFzxg`G$tZ=~?Yv?9t6@!WH#zRbD3JI{^L4Ap?V=zhUyMW`NQ(-?Lq*rYtUw-^cF7Z4+jOMzEo9G=pOQ#IQ_2x2?zH0Q|R)Hmz>rcW%2JL zC<7{spT0JHZEB3NxN`($;qUzLIqQrFh(I(6*g{#nw>Pv1o6Pdv+v`G`r(JUU(qiDy z5A0GK)k6eCAW#CKnP(B`-G)g=(*(a(`BhtHg2rCC1UP?|D{+T01%2Zw;Fhz1X~nK% z`^^_WO&zN)0`7kg`1mmJ^ky^9%Ba5Yo(=r!O%v*)(DlN9c_(mOIq>BZfjMQa>+gOY z7@rGVw#c>5f4~1cuzVVD@+6bTbFSmJ{T2Ai$-ucY2hurZYwdnlZ|$(@Wt}|(_~z*_ zq2Gb`_5gqU2)KKNiv-Pe_r7c90PZkM^MKFH@h0T&JX5bb0NnEyu%{82Yue~LXS(v@ zIkx4D65v2Hu-de7x_M%+I|(?)fo(z_5)p_N0rPr|uF&MO_&wgI1=2lScwQWxD1 z{N`;}x%jfV?|q=Qb)cLL<{BPr?*e9*0$FLm1oNWei-z*w+l_Mg6Y$tN;0mJJsf;rZvb0`HiYo~Qg@la}d{lYoWeU1#Qx1Imr^a+)-E0$=<+!1`0c-um2tY3lC{ zz$0r1h;DY=*WX-9tJgdq=l>_L*|e2J1VkWm1k86@bmiY(0gfC6s%`~RO=)gC&Gq8HXWw0N;UVt0cQ0*qkK*^vA^yF*kk0qBqAUJks%QE zp2dgzjY+|nIjAVya*?OG6JhM;Jio`zwntB#}(z{~n#S=`g ztYzqR>x{CX+0d;hnBvo)m=C$_@BGmoV|qLY@RuJ4?!PoAe&Eqo{19-j(#l` zh=2%0j)2E2I&wGlyc10xZeN1?IA1^%njGBX8N9XI^cwt0rHO6mDTSFpo5u^rGe4$d zPekG(z%P0C%b7s-j(8hviX9fW124$iszRk9oqinmA`wZpaYg)$P}8I zJoD=@iempKHgcYKzxW0#6rPg|5TKdrkntt42XXu-Y-t{Hi@)8nzmf)jy5d~>Vr6q>D?&DXQUHvyW5<{1b0$`_2XZ35`S;K(Sq_&d*ZH=OEnm~*5P+kowV z@q<8pn>!c%ihu~j0)eP2i)CiG>iw4ibaa>80bFpu%U=HImj>|s=2DkKv*7!{33nT( z=<t!JSHsH4B&FCtV$63ZVLizSGV6rh&&G@mgx!(!U ztVO?xyGN0m?y|jeWY5YMtxNQe4+7~%k*~QHs5VNP0|?0fg4qKJ{hepZJI-?rDERzC zK+dg3q5lcEdAVz3)Id(!5&;p2Hi6JAess4nFpqKV1b)@i6vUybzDy8(=BtxV)(OUB z;b~8I1LUwxj^fEobq&3pm=FBgCe4Y?#Kn)EzuI)S9vM?M!CYKt%u!@X1>sBlou|TPdsLWq3%lH8Mo%OS)97Kp zxo#i=@kt=+J_|QCzRdZ8A5N$sMmo>YRKEWEbL7l$;&!ADcRt7TFeHHyp#Nbc@pqmd z2oX^fBNbBTL_h?FB{14==3z68g%kOvqr4*r3 zfCqulOv>}fP@97Y95XWo6C&J!