diff --git a/docs/1.7.0/torchvision/.nojekyll b/docs/1.7.0/torchvision/.nojekyll new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/1.7.0/torchvision/_modules/index.html b/docs/1.7.0/torchvision/_modules/index.html new file mode 100644 index 000000000000..ded9c6f7b1fc --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/index.html @@ -0,0 +1,599 @@ + + + + + + + + + + + + Overview: module code — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

All modules for which code is available

+ + +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision.html b/docs/1.7.0/torchvision/_modules/torchvision.html new file mode 100644 index 000000000000..da5aa22fbab5 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision.html @@ -0,0 +1,629 @@ + + + + + + + + + + + + torchvision — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision

+import warnings
+
+from .extension import _HAS_OPS
+
+from torchvision import models
+from torchvision import datasets
+from torchvision import ops
+from torchvision import transforms
+from torchvision import utils
+from torchvision import io
+
+import torch
+
+try:
+    from .version import __version__  # noqa: F401
+except ImportError:
+    pass
+
+_image_backend = 'PIL'
+
+_video_backend = "pyav"
+
+
+
[docs]def set_image_backend(backend): + """ + Specifies the package used to load images. + + Args: + backend (string): Name of the image backend. one of {'PIL', 'accimage'}. + The :mod:`accimage` package uses the Intel IPP library. It is + generally faster than PIL, but does not support as many operations. + """ + global _image_backend + if backend not in ['PIL', 'accimage']: + raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'" + .format(backend)) + _image_backend = backend
+ + +
[docs]def get_image_backend(): + """ + Gets the name of the package used to load images + """ + return _image_backend
+ + +
[docs]def set_video_backend(backend): + """ + Specifies the package used to decode videos. + + Args: + backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. + The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic + binding for the FFmpeg libraries. + The :mod:`video_reader` package includes a native C++ implementation on + top of FFMPEG libraries, and a python API of TorchScript custom operator. + It is generally decoding faster than :mod:`pyav`, but perhaps is less robust. + """ + global _video_backend + if backend not in ["pyav", "video_reader"]: + raise ValueError( + "Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend + ) + if backend == "video_reader" and not io._HAS_VIDEO_OPT: + message = ( + "video_reader video backend is not available." + " Please compile torchvision from source and try again" + ) + warnings.warn(message) + else: + _video_backend = backend
+ + +def get_video_backend(): + return _video_backend + + +def _is_tracing(): + return torch._C._get_tracing_state() +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/celeba.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/celeba.html new file mode 100644 index 000000000000..eba3806b74ed --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/celeba.html @@ -0,0 +1,718 @@ + + + + + + + + + + + + torchvision.datasets.celeba — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.celeba

+from functools import partial
+import torch
+import os
+import PIL
+from typing import Any, Callable, List, Optional, Union, Tuple
+from .vision import VisionDataset
+from .utils import download_file_from_google_drive, check_integrity, verify_str_arg
+
+
+
[docs]class CelebA(VisionDataset): + """`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + split (string): One of {'train', 'valid', 'test', 'all'}. + Accordingly dataset is selected. + target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``, + or ``landmarks``. Can also be a list to output a tuple with all specified target types. + The targets represent: + ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes + ``identity`` (int): label for each person (data points with the same identity are the same person) + ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height) + ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x, + righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y) + Defaults to ``attr``. If empty, ``None`` will be returned as target. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + base_folder = "celeba" + # There currently does not appear to be a easy way to extract 7z in python (without introducing additional + # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available + # right now. + file_list = [ + # File ID MD5 Hash Filename + ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"), + # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"), + # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"), + ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"), + ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"), + ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"), + ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"), + # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"), + ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"), + ] + + def __init__( + self, + root: str, + split: str = "train", + target_type: Union[List[str], str] = "attr", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + import pandas + super(CelebA, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = split + if isinstance(target_type, list): + self.target_type = target_type + else: + self.target_type = [target_type] + + if not self.target_type and self.target_transform is not None: + raise RuntimeError('target_transform is specified but target_type is empty') + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + split_map = { + "train": 0, + "valid": 1, + "test": 2, + "all": None, + } + split_ = split_map[verify_str_arg(split.lower(), "split", + ("train", "valid", "test", "all"))] + + fn = partial(os.path.join, self.root, self.base_folder) + splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0) + identity = pandas.read_csv(fn("identity_CelebA.txt"), delim_whitespace=True, header=None, index_col=0) + bbox = pandas.read_csv(fn("list_bbox_celeba.txt"), delim_whitespace=True, header=1, index_col=0) + landmarks_align = pandas.read_csv(fn("list_landmarks_align_celeba.txt"), delim_whitespace=True, header=1) + attr = pandas.read_csv(fn("list_attr_celeba.txt"), delim_whitespace=True, header=1) + + mask = slice(None) if split_ is None else (splits[1] == split_) + + self.filename = splits[mask].index.values + self.identity = torch.as_tensor(identity[mask].values) + self.bbox = torch.as_tensor(bbox[mask].values) + self.landmarks_align = torch.as_tensor(landmarks_align[mask].values) + self.attr = torch.as_tensor(attr[mask].values) + self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1} + self.attr_names = list(attr.columns) + + def _check_integrity(self) -> bool: + for (_, md5, filename) in self.file_list: + fpath = os.path.join(self.root, self.base_folder, filename) + _, ext = os.path.splitext(filename) + # Allow original archive to be deleted (zip and 7z) + # Only need the extracted images + if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5): + return False + + # Should check a hash of the images + return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba")) + + def download(self) -> None: + import zipfile + + if self._check_integrity(): + print('Files already downloaded and verified') + return + + for (file_id, md5, filename) in self.file_list: + download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5) + + with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f: + f.extractall(os.path.join(self.root, self.base_folder)) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index])) + + target: Any = [] + for t in self.target_type: + if t == "attr": + target.append(self.attr[index, :]) + elif t == "identity": + target.append(self.identity[index, 0]) + elif t == "bbox": + target.append(self.bbox[index, :]) + elif t == "landmarks": + target.append(self.landmarks_align[index, :]) + else: + # TODO: refactor with utils.verify_str_arg + raise ValueError("Target type \"{}\" is not recognized.".format(t)) + + if self.transform is not None: + X = self.transform(X) + + if target: + target = tuple(target) if len(target) > 1 else target[0] + + if self.target_transform is not None: + target = self.target_transform(target) + else: + target = None + + return X, target + + def __len__(self) -> int: + return len(self.attr) + + def extra_repr(self) -> str: + lines = ["Target type: {target_type}", "Split: {split}"] + return '\n'.join(lines).format(**self.__dict__)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/cifar.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/cifar.html new file mode 100644 index 000000000000..42d4905962b4 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/cifar.html @@ -0,0 +1,721 @@ + + + + + + + + + + + + torchvision.datasets.cifar — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.cifar

+from PIL import Image
+import os
+import os.path
+import numpy as np
+import pickle
+from typing import Any, Callable, Optional, Tuple
+
+from .vision import VisionDataset
+from .utils import check_integrity, download_and_extract_archive
+
+
+
[docs]class CIFAR10(VisionDataset): + """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``cifar-10-batches-py`` exists or will be saved to if download is set to True. + train (bool, optional): If True, creates dataset from training set, otherwise + creates from test set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + base_folder = 'cifar-10-batches-py' + url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" + filename = "cifar-10-python.tar.gz" + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + meta = { + 'filename': 'batches.meta', + 'key': 'label_names', + 'md5': '5ff9c542aee3614f3951f8cda6e48888', + } + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + + super(CIFAR10, self).__init__(root, transform=transform, + target_transform=target_transform) + + self.train = train # training set or test set + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + if self.train: + downloaded_list = self.train_list + else: + downloaded_list = self.test_list + + self.data: Any = [] + self.targets = [] + + # now load the picked numpy arrays + for file_name, checksum in downloaded_list: + file_path = os.path.join(self.root, self.base_folder, file_name) + with open(file_path, 'rb') as f: + entry = pickle.load(f, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.targets.extend(entry['labels']) + else: + self.targets.extend(entry['fine_labels']) + + self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) + self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC + + self._load_meta() + + def _load_meta(self) -> None: + path = os.path.join(self.root, self.base_folder, self.meta['filename']) + if not check_integrity(path, self.meta['md5']): + raise RuntimeError('Dataset metadata file not found or corrupted.' + + ' You can use download=True to download it') + with open(path, 'rb') as infile: + data = pickle.load(infile, encoding='latin1') + self.classes = data[self.meta['key']] + self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)} + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], self.targets[index] + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + return len(self.data) + + def _check_integrity(self) -> bool: + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + + def extra_repr(self) -> str: + return "Split: {}".format("Train" if self.train is True else "Test")
+ + +
[docs]class CIFAR100(CIFAR10): + """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. + + This is a subclass of the `CIFAR10` Dataset. + """ + base_folder = 'cifar-100-python' + url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" + filename = "cifar-100-python.tar.gz" + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] + meta = { + 'filename': 'meta', + 'key': 'fine_label_names', + 'md5': '7973b15100ade9c7d40fb424638fde48', + }
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/cityscapes.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/cityscapes.html new file mode 100644 index 000000000000..910fb878aea0 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/cityscapes.html @@ -0,0 +1,768 @@ + + + + + + + + + + + + torchvision.datasets.cityscapes — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.cityscapes

+import json
+import os
+from collections import namedtuple
+import zipfile
+from typing import Any, Callable, Dict, List, Optional, Union, Tuple
+
+from .utils import extract_archive, verify_str_arg, iterable_to_str
+from .vision import VisionDataset
+from PIL import Image
+
+
+
[docs]class Cityscapes(VisionDataset): + """`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory ``leftImg8bit`` + and ``gtFine`` or ``gtCoarse`` are located. + split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode="fine" + otherwise ``train``, ``train_extra`` or ``val`` + mode (string, optional): The quality mode to use, ``fine`` or ``coarse`` + target_type (string or list, optional): Type of target to use, ``instance``, ``semantic``, ``polygon`` + or ``color``. Can also be a list to output a tuple with all specified target types. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + + Examples: + + Get semantic segmentation target + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='train', mode='fine', + target_type='semantic') + + img, smnt = dataset[0] + + Get multiple targets + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='train', mode='fine', + target_type=['instance', 'color', 'polygon']) + + img, (inst, col, poly) = dataset[0] + + Validate on the "coarse" set + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse', + target_type='semantic') + + img, smnt = dataset[0] + """ + + # Based on https://github.com/mcordts/cityscapesScripts + CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id', + 'has_instances', 'ignore_in_eval', 'color']) + + classes = [ + CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)), + CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)), + CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)), + CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)), + CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)), + CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)), + CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)), + CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)), + CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)), + CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)), + CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)), + CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)), + CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)), + CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)), + CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)), + CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)), + CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)), + CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)), + CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)), + CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)), + CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)), + CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)), + CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)), + CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)), + CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)), + CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)), + CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)), + CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)), + CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)), + CityscapesClass('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)), + ] + + def __init__( + self, + root: str, + split: str = "train", + mode: str = "fine", + target_type: Union[List[str], str] = "instance", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ) -> None: + super(Cityscapes, self).__init__(root, transforms, transform, target_transform) + self.mode = 'gtFine' if mode == 'fine' else 'gtCoarse' + self.images_dir = os.path.join(self.root, 'leftImg8bit', split) + self.targets_dir = os.path.join(self.root, self.mode, split) + self.target_type = target_type + self.split = split + self.images = [] + self.targets = [] + + verify_str_arg(mode, "mode", ("fine", "coarse")) + if mode == "fine": + valid_modes = ("train", "test", "val") + else: + valid_modes = ("train", "train_extra", "val") + msg = ("Unknown value '{}' for argument split if mode is '{}'. " + "Valid values are {{{}}}.") + msg = msg.format(split, mode, iterable_to_str(valid_modes)) + verify_str_arg(split, "split", valid_modes, msg) + + if not isinstance(target_type, list): + self.target_type = [target_type] + [verify_str_arg(value, "target_type", + ("instance", "semantic", "polygon", "color")) + for value in self.target_type] + + if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): + + if split == 'train_extra': + image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainextra.zip')) + else: + image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainvaltest.zip')) + + if self.mode == 'gtFine': + target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '_trainvaltest.zip')) + elif self.mode == 'gtCoarse': + target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '.zip')) + + if os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip): + extract_archive(from_path=image_dir_zip, to_path=self.root) + extract_archive(from_path=target_dir_zip, to_path=self.root) + else: + raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the' + ' specified "split" and "mode" are inside the "root" directory') + + for city in os.listdir(self.images_dir): + img_dir = os.path.join(self.images_dir, city) + target_dir = os.path.join(self.targets_dir, city) + for file_name in os.listdir(img_dir): + target_types = [] + for t in self.target_type: + target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0], + self._get_target_suffix(self.mode, t)) + target_types.append(os.path.join(target_dir, target_name)) + + self.images.append(os.path.join(img_dir, file_name)) + self.targets.append(target_types) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + Returns: + tuple: (image, target) where target is a tuple of all target types if target_type is a list with more + than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation. + """ + + image = Image.open(self.images[index]).convert('RGB') + + targets: Any = [] + for i, t in enumerate(self.target_type): + if t == 'polygon': + target = self._load_json(self.targets[index][i]) + else: + target = Image.open(self.targets[index][i]) + + targets.append(target) + + target = tuple(targets) if len(targets) > 1 else targets[0] + + if self.transforms is not None: + image, target = self.transforms(image, target) + + return image, target
+ + def __len__(self) -> int: + return len(self.images) + + def extra_repr(self) -> str: + lines = ["Split: {split}", "Mode: {mode}", "Type: {target_type}"] + return '\n'.join(lines).format(**self.__dict__) + + def _load_json(self, path: str) -> Dict[str, Any]: + with open(path, 'r') as file: + data = json.load(file) + return data + + def _get_target_suffix(self, mode: str, target_type: str) -> str: + if target_type == 'instance': + return '{}_instanceIds.png'.format(mode) + elif target_type == 'semantic': + return '{}_labelIds.png'.format(mode) + elif target_type == 'color': + return '{}_color.png'.format(mode) + else: + return '{}_polygons.json'.format(mode)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/coco.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/coco.html new file mode 100644 index 000000000000..5a9601ad5b7c --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/coco.html @@ -0,0 +1,690 @@ + + + + + + + + + + + + torchvision.datasets.coco — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.coco

+from .vision import VisionDataset
+from PIL import Image
+import os
+import os.path
+from typing import Any, Callable, Optional, Tuple
+
+
+
[docs]class CocoCaptions(VisionDataset): + """`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + + Example: + + .. code:: python + + import torchvision.datasets as dset + import torchvision.transforms as transforms + cap = dset.CocoCaptions(root = 'dir where images are', + annFile = 'json annotation file', + transform=transforms.ToTensor()) + + print('Number of samples: ', len(cap)) + img, target = cap[3] # load 4th sample + + print("Image Size: ", img.size()) + print(target) + + Output: :: + + Number of samples: 82783 + Image Size: (3L, 427L, 640L) + [u'A plane emitting smoke stream flying over a mountain.', + u'A plane darts across a bright blue sky behind a mountain covered in snow', + u'A plane leaves a contrail above the snowy mountain top.', + u'A mountain that has a plane flying overheard in the distance.', + u'A mountain view with a plume of smoke in the background'] + + """ + + def __init__( + self, + root: str, + annFile: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ) -> None: + super(CocoCaptions, self).__init__(root, transforms, transform, target_transform) + from pycocotools.coco import COCO + self.coco = COCO(annFile) + self.ids = list(sorted(self.coco.imgs.keys())) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + anns = coco.loadAnns(ann_ids) + target = [ann['caption'] for ann in anns] + + path = coco.loadImgs(img_id)[0]['file_name'] + + img = Image.open(os.path.join(self.root, path)).convert('RGB') + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
+ + def __len__(self) -> int: + return len(self.ids)
+ + +
[docs]class CocoDetection(VisionDataset): + """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__( + self, + root: str, + annFile: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ) -> None: + super(CocoDetection, self).__init__(root, transforms, transform, target_transform) + from pycocotools.coco import COCO + self.coco = COCO(annFile) + self.ids = list(sorted(self.coco.imgs.keys())) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + target = coco.loadAnns(ann_ids) + + path = coco.loadImgs(img_id)[0]['file_name'] + + img = Image.open(os.path.join(self.root, path)).convert('RGB') + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
+ + def __len__(self) -> int: + return len(self.ids)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/fakedata.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/fakedata.html new file mode 100644 index 000000000000..5be43d866bb5 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/fakedata.html @@ -0,0 +1,618 @@ + + + + + + + + + + + + torchvision.datasets.fakedata — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.fakedata

+import torch
+from typing import Any, Callable, Optional, Tuple
+from .vision import VisionDataset
+from .. import transforms
+
+
+
[docs]class FakeData(VisionDataset): + """A fake dataset that returns randomly generated images and returns them as PIL images + + Args: + size (int, optional): Size of the dataset. Default: 1000 images + image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224) + num_classes(int, optional): Number of classes in the datset. Default: 10 + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + random_offset (int): Offsets the index-based random seed used to + generate each image. Default: 0 + + """ + + def __init__( + self, + size: int = 1000, + image_size: Tuple[int, int, int] = (3, 224, 224), + num_classes: int = 10, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + random_offset: int = 0, + ) -> None: + super(FakeData, self).__init__(None, transform=transform, # type: ignore[arg-type] + target_transform=target_transform) + self.size = size + self.num_classes = num_classes + self.image_size = image_size + self.random_offset = random_offset + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is class_index of the target class. + """ + # create random image that is consistent with the index id + if index >= len(self): + raise IndexError("{} index out of range".format(self.__class__.__name__)) + rng_state = torch.get_rng_state() + torch.manual_seed(index + self.random_offset) + img = torch.randn(*self.image_size) + target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0] + torch.set_rng_state(rng_state) + + # convert to PIL Image + img = transforms.ToPILImage()(img) + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return self.size
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/flickr.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/flickr.html new file mode 100644 index 000000000000..b67d4e087f78 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/flickr.html @@ -0,0 +1,719 @@ + + + + + + + + + + + + torchvision.datasets.flickr — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.flickr

+from collections import defaultdict
+from PIL import Image
+from html.parser import HTMLParser
+from typing import Any, Callable, Dict, List, Optional, Tuple
+
+import glob
+import os
+from .vision import VisionDataset
+
+
+class Flickr8kParser(HTMLParser):
+    """Parser for extracting captions from the Flickr8k dataset web page."""
+
+    def __init__(self, root: str) -> None:
+        super(Flickr8kParser, self).__init__()
+
+        self.root = root
+
+        # Data structure to store captions
+        self.annotations: Dict[str, List[str]] = {}
+
+        # State variables
+        self.in_table = False
+        self.current_tag: Optional[str] = None
+        self.current_img: Optional[str] = None
+
+    def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
+        self.current_tag = tag
+
+        if tag == 'table':
+            self.in_table = True
+
+    def handle_endtag(self, tag: str) -> None:
+        self.current_tag = None
+
+        if tag == 'table':
+            self.in_table = False
+
+    def handle_data(self, data: str) -> None:
+        if self.in_table:
+            if data == 'Image Not Found':
+                self.current_img = None
+            elif self.current_tag == 'a':
+                img_id = data.split('/')[-2]
+                img_id = os.path.join(self.root, img_id + '_*.jpg')
+                img_id = glob.glob(img_id)[0]
+                self.current_img = img_id
+                self.annotations[img_id] = []
+            elif self.current_tag == 'li' and self.current_img:
+                img_id = self.current_img
+                self.annotations[img_id].append(data.strip())
+
+
+
[docs]class Flickr8k(VisionDataset): + """`Flickr8k Entities <http://hockenmaier.cs.illinois.edu/8k-pictures.html>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + ann_file (string): Path to annotation file. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__( + self, + root: str, + ann_file: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + super(Flickr8k, self).__init__(root, transform=transform, + target_transform=target_transform) + self.ann_file = os.path.expanduser(ann_file) + + # Read annotations and store in a dict + parser = Flickr8kParser(self.root) + with open(self.ann_file) as fh: + parser.feed(fh.read()) + self.annotations = parser.annotations + + self.ids = list(sorted(self.annotations.keys())) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + img_id = self.ids[index] + + # Image + img = Image.open(img_id).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + # Captions + target = self.annotations[img_id] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + return len(self.ids)
+ + +
[docs]class Flickr30k(VisionDataset): + """`Flickr30k Entities <http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + ann_file (string): Path to annotation file. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__( + self, + root: str, + ann_file: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + super(Flickr30k, self).__init__(root, transform=transform, + target_transform=target_transform) + self.ann_file = os.path.expanduser(ann_file) + + # Read annotations and store in a dict + self.annotations = defaultdict(list) + with open(self.ann_file) as fh: + for line in fh: + img_id, caption = line.strip().split('\t') + self.annotations[img_id[:-2]].append(caption) + + self.ids = list(sorted(self.annotations.keys())) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + img_id = self.ids[index] + + # Image + filename = os.path.join(self.root, img_id) + img = Image.open(filename).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + # Captions + target = self.annotations[img_id] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + return len(self.ids)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/folder.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/folder.html new file mode 100644 index 000000000000..2588a1e35ac1 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/folder.html @@ -0,0 +1,782 @@ + + + + + + + + + + + + torchvision.datasets.folder — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.folder

+from .vision import VisionDataset
+
+from PIL import Image
+
+import os
+import os.path
+from typing import Any, Callable, cast, Dict, List, Optional, Tuple
+
+
+def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
+    """Checks if a file is an allowed extension.
+
+    Args:
+        filename (string): path to a file
+        extensions (tuple of strings): extensions to consider (lowercase)
+
+    Returns:
+        bool: True if the filename ends with one of given extensions
+    """
+    return filename.lower().endswith(extensions)
+
+
+def is_image_file(filename: str) -> bool:
+    """Checks if a file is an allowed image extension.
+
+    Args:
+        filename (string): path to a file
+
+    Returns:
+        bool: True if the filename ends with a known image extension
+    """
+    return has_file_allowed_extension(filename, IMG_EXTENSIONS)
+
+
+def make_dataset(
+    directory: str,
+    class_to_idx: Dict[str, int],
+    extensions: Optional[Tuple[str, ...]] = None,
+    is_valid_file: Optional[Callable[[str], bool]] = None,
+) -> List[Tuple[str, int]]:
+    instances = []
+    directory = os.path.expanduser(directory)
+    both_none = extensions is None and is_valid_file is None
+    both_something = extensions is not None and is_valid_file is not None
+    if both_none or both_something:
+        raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
+    if extensions is not None:
+        def is_valid_file(x: str) -> bool:
+            return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
+    is_valid_file = cast(Callable[[str], bool], is_valid_file)
+    for target_class in sorted(class_to_idx.keys()):
+        class_index = class_to_idx[target_class]
+        target_dir = os.path.join(directory, target_class)
+        if not os.path.isdir(target_dir):
+            continue
+        for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
+            for fname in sorted(fnames):
+                path = os.path.join(root, fname)
+                if is_valid_file(path):
+                    item = path, class_index
+                    instances.append(item)
+    return instances
+
+
+
[docs]class DatasetFolder(VisionDataset): + """A generic data loader where the samples are arranged in this way: :: + + root/class_x/xxx.ext + root/class_x/xxy.ext + root/class_x/xxz.ext + + root/class_y/123.ext + root/class_y/nsdf3.ext + root/class_y/asd932_.ext + + Args: + root (string): Root directory path. + loader (callable): A function to load a sample given its path. + extensions (tuple[string]): A list of allowed extensions. + both extensions and is_valid_file should not be passed. + transform (callable, optional): A function/transform that takes in + a sample and returns a transformed version. + E.g, ``transforms.RandomCrop`` for images. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + is_valid_file (callable, optional): A function that takes path of a file + and check if the file is a valid file (used to check of corrupt files) + both extensions and is_valid_file should not be passed. + + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + samples (list): List of (sample path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__( + self, + root: str, + loader: Callable[[str], Any], + extensions: Optional[Tuple[str, ...]] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> None: + super(DatasetFolder, self).__init__(root, transform=transform, + target_transform=target_transform) + classes, class_to_idx = self._find_classes(self.root) + samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) + if len(samples) == 0: + msg = "Found 0 files in subfolders of: {}\n".format(self.root) + if extensions is not None: + msg += "Supported extensions are: {}".format(",".join(extensions)) + raise RuntimeError(msg) + + self.loader = loader + self.extensions = extensions + + self.classes = classes + self.class_to_idx = class_to_idx + self.samples = samples + self.targets = [s[1] for s in samples] + + def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]: + """ + Finds the class folders in a dataset. + + Args: + dir (string): Root directory path. + + Returns: + tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary. + + Ensures: + No class is a subdirectory of another. + """ + classes = [d.name for d in os.scandir(dir) if d.is_dir()] + classes.sort() + class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} + return classes, class_to_idx + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (sample, target) where target is class_index of the target class. + """ + path, target = self.samples[index] + sample = self.loader(path) + if self.transform is not None: + sample = self.transform(sample) + if self.target_transform is not None: + target = self.target_transform(target) + + return sample, target
+ + def __len__(self) -> int: + return len(self.samples)
+ + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') + + +def pil_loader(path: str) -> Image.Image: + # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) + with open(path, 'rb') as f: + img = Image.open(f) + return img.convert('RGB') + + +# TODO: specify the return type +def accimage_loader(path: str) -> Any: + import accimage + try: + return accimage.Image(path) + except IOError: + # Potentially a decoding problem, fall back to PIL.Image + return pil_loader(path) + + +def default_loader(path: str) -> Any: + from torchvision import get_image_backend + if get_image_backend() == 'accimage': + return accimage_loader(path) + else: + return pil_loader(path) + + +
[docs]class ImageFolder(DatasetFolder): + """A generic data loader where the images are arranged in this way: :: + + root/dog/xxx.png + root/dog/xxy.png + root/dog/xxz.png + + root/cat/123.png + root/cat/nsdf3.png + root/cat/asd932_.png + + Args: + root (string): Root directory path. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + is_valid_file (callable, optional): A function that takes path of an Image file + and check if the file is a valid file (used to check of corrupt files) + + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + """ + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + loader: Callable[[str], Any] = default_loader, + is_valid_file: Optional[Callable[[str], bool]] = None, + ): + super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None, + transform=transform, + target_transform=target_transform, + is_valid_file=is_valid_file) + self.imgs = self.samples
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/hmdb51.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/hmdb51.html new file mode 100644 index 000000000000..06d31d44ad38 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/hmdb51.html @@ -0,0 +1,682 @@ + + + + + + + + + + + + torchvision.datasets.hmdb51 — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.hmdb51

+import glob
+import os
+
+from .utils import list_dir
+from .folder import make_dataset
+from .video_utils import VideoClips
+from .vision import VisionDataset
+
+
+
[docs]class HMDB51(VisionDataset): + """ + `HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_ + dataset. + + HMDB51 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the HMDB51 Dataset. + annotation_path (str): Path to the folder containing the split files. + frames_per_clip (int): Number of frames in a clip. + step_between_clips (int): Number of frames between each clip. + fold (int, optional): Which fold to use. Should be between 1 and 3. + train (bool, optional): If ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + video (Tensor[T, H, W, C]): the `T` video frames + audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + label (int): class of the video clip + """ + + data_url = "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar" + splits = { + "url": "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar", + "md5": "15e67781e70dcfbdce2d7dbb9b3344b5" + } + TRAIN_TAG = 1 + TEST_TAG = 2 + + def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1, + frame_rate=None, fold=1, train=True, transform=None, + _precomputed_metadata=None, num_workers=1, _video_width=0, + _video_height=0, _video_min_dimension=0, _audio_samples=0): + super(HMDB51, self).__init__(root) + if fold not in (1, 2, 3): + raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + + extensions = ('avi',) + classes = sorted(list_dir(root)) + class_to_idx = {class_: i for (i, class_) in enumerate(classes)} + self.samples = make_dataset( + self.root, + class_to_idx, + extensions, + ) + + video_paths = [path for (path, _) in self.samples] + video_clips = VideoClips( + video_paths, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + ) + self.fold = fold + self.train = train + self.classes = classes + self.video_clips_metadata = video_clips.metadata + self.indices = self._select_fold(video_paths, annotation_path, fold, train) + self.video_clips = video_clips.subset(self.indices) + self.transform = transform + + @property + def metadata(self): + return self.video_clips_metadata + + def _select_fold(self, video_list, annotations_dir, fold, train): + target_tag = self.TRAIN_TAG if train else self.TEST_TAG + split_pattern_name = "*test_split{}.txt".format(fold) + split_pattern_path = os.path.join(annotations_dir, split_pattern_name) + annotation_paths = glob.glob(split_pattern_path) + selected_files = [] + for filepath in annotation_paths: + with open(filepath) as fid: + lines = fid.readlines() + for line in lines: + video_filename, tag_string = line.split() + tag = int(tag_string) + if tag == target_tag: + selected_files.append(video_filename) + selected_files = set(selected_files) + + indices = [] + for video_index, video_path in enumerate(video_list): + if os.path.basename(video_path) in selected_files: + indices.append(video_index) + + return indices + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, _, video_idx = self.video_clips.get_clip(idx) + sample_index = self.indices[video_idx] + _, class_index = self.samples[sample_index] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, class_index
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/imagenet.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/imagenet.html new file mode 100644 index 000000000000..6dd044ca58a1 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/imagenet.html @@ -0,0 +1,773 @@ + + + + + + + + + + + + torchvision.datasets.imagenet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.imagenet

+import warnings
+from contextlib import contextmanager
+import os
+import shutil
+import tempfile
+from typing import Any, Dict, List, Iterator, Optional, Tuple
+import torch
+from .folder import ImageFolder
+from .utils import check_integrity, extract_archive, verify_str_arg
+
+ARCHIVE_META = {
+    'train': ('ILSVRC2012_img_train.tar', '1d675b47d978889d74fa0da5fadfb00e'),
+    'val': ('ILSVRC2012_img_val.tar', '29b22e2961454d5413ddabcf34fc5622'),
+    'devkit': ('ILSVRC2012_devkit_t12.tar.gz', 'fa75699e90414af021442c21a62c3abf')
+}
+
+META_FILE = "meta.bin"
+
+
+
[docs]class ImageNet(ImageFolder): + """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset. + + Args: + root (string): Root directory of the ImageNet Dataset. + split (string, optional): The dataset split, supports ``train``, or ``val``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + + Attributes: + classes (list): List of the class name tuples. + class_to_idx (dict): Dict with items (class_name, class_index). + wnids (list): List of the WordNet IDs. + wnid_to_idx (dict): Dict with items (wordnet_id, class_index). + imgs (list): List of (image path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__(self, root: str, split: str = 'train', download: Optional[str] = None, **kwargs: Any) -> None: + if download is True: + msg = ("The dataset is no longer publicly accessible. You need to " + "download the archives externally and place them in the root " + "directory.") + raise RuntimeError(msg) + elif download is False: + msg = ("The use of the download flag is deprecated, since the dataset " + "is no longer publicly accessible.") + warnings.warn(msg, RuntimeWarning) + + root = self.root = os.path.expanduser(root) + self.split = verify_str_arg(split, "split", ("train", "val")) + + self.parse_archives() + wnid_to_classes = load_meta_file(self.root)[0] + + super(ImageNet, self).__init__(self.split_folder, **kwargs) + self.root = root + + self.wnids = self.classes + self.wnid_to_idx = self.class_to_idx + self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] + self.class_to_idx = {cls: idx + for idx, clss in enumerate(self.classes) + for cls in clss} + + def parse_archives(self) -> None: + if not check_integrity(os.path.join(self.root, META_FILE)): + parse_devkit_archive(self.root) + + if not os.path.isdir(self.split_folder): + if self.split == 'train': + parse_train_archive(self.root) + elif self.split == 'val': + parse_val_archive(self.root) + + @property + def split_folder(self) -> str: + return os.path.join(self.root, self.split) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__)
+ + +def load_meta_file(root: str, file: Optional[str] = None) -> Tuple[Dict[str, str], List[str]]: + if file is None: + file = META_FILE + file = os.path.join(root, file) + + if check_integrity(file): + return torch.load(file) + else: + msg = ("The meta file {} is not present in the root directory or is corrupted. " + "This file is automatically created by the ImageNet dataset.") + raise RuntimeError(msg.format(file, root)) + + +def _verify_archive(root: str, file: str, md5: str) -> None: + if not check_integrity(os.path.join(root, file), md5): + msg = ("The archive {} is not present in the root directory or is corrupted. " + "You need to download it externally and place it in {}.") + raise RuntimeError(msg.format(file, root)) + + +def parse_devkit_archive(root: str, file: Optional[str] = None) -> None: + """Parse the devkit archive of the ImageNet2012 classification dataset and save + the meta information in a binary file. + + Args: + root (str): Root directory containing the devkit archive + file (str, optional): Name of devkit archive. Defaults to + 'ILSVRC2012_devkit_t12.tar.gz' + """ + import scipy.io as sio + + def parse_meta_mat(devkit_root: str) -> Tuple[Dict[int, str], Dict[str, str]]: + metafile = os.path.join(devkit_root, "data", "meta.mat") + meta = sio.loadmat(metafile, squeeze_me=True)['synsets'] + nums_children = list(zip(*meta))[4] + meta = [meta[idx] for idx, num_children in enumerate(nums_children) + if num_children == 0] + idcs, wnids, classes = list(zip(*meta))[:3] + classes = [tuple(clss.split(', ')) for clss in classes] + idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)} + wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)} + return idx_to_wnid, wnid_to_classes + + def parse_val_groundtruth_txt(devkit_root: str) -> List[int]: + file = os.path.join(devkit_root, "data", + "ILSVRC2012_validation_ground_truth.txt") + with open(file, 'r') as txtfh: + val_idcs = txtfh.readlines() + return [int(val_idx) for val_idx in val_idcs] + + @contextmanager + def get_tmp_dir() -> Iterator[str]: + tmp_dir = tempfile.mkdtemp() + try: + yield tmp_dir + finally: + shutil.rmtree(tmp_dir) + + archive_meta = ARCHIVE_META["devkit"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + + _verify_archive(root, file, md5) + + with get_tmp_dir() as tmp_dir: + extract_archive(os.path.join(root, file), tmp_dir) + + devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12") + idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root) + val_idcs = parse_val_groundtruth_txt(devkit_root) + val_wnids = [idx_to_wnid[idx] for idx in val_idcs] + + torch.save((wnid_to_classes, val_wnids), os.path.join(root, META_FILE)) + + +def parse_train_archive(root: str, file: Optional[str] = None, folder: str = "train") -> None: + """Parse the train images archive of the ImageNet2012 classification dataset and + prepare it for usage with the ImageNet dataset. + + Args: + root (str): Root directory containing the train images archive + file (str, optional): Name of train images archive. Defaults to + 'ILSVRC2012_img_train.tar' + folder (str, optional): Optional name for train images folder. Defaults to + 'train' + """ + archive_meta = ARCHIVE_META["train"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + + _verify_archive(root, file, md5) + + train_root = os.path.join(root, folder) + extract_archive(os.path.join(root, file), train_root) + + archives = [os.path.join(train_root, archive) for archive in os.listdir(train_root)] + for archive in archives: + extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True) + + +def parse_val_archive( + root: str, file: Optional[str] = None, wnids: Optional[List[str]] = None, folder: str = "val" +) -> None: + """Parse the validation images archive of the ImageNet2012 classification dataset + and prepare it for usage with the ImageNet dataset. + + Args: + root (str): Root directory containing the validation images archive + file (str, optional): Name of validation images archive. Defaults to + 'ILSVRC2012_img_val.tar' + wnids (list, optional): List of WordNet IDs of the validation images. If None + is given, the IDs are loaded from the meta file in the root directory + folder (str, optional): Optional name for validation images folder. Defaults to + 'val' + """ + archive_meta = ARCHIVE_META["val"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + if wnids is None: + wnids = load_meta_file(root)[1] + + _verify_archive(root, file, md5) + + val_root = os.path.join(root, folder) + extract_archive(os.path.join(root, file), val_root) + + images = sorted([os.path.join(val_root, image) for image in os.listdir(val_root)]) + + for wnid in set(wnids): + os.mkdir(os.path.join(val_root, wnid)) + + for wnid, img_file in zip(wnids, images): + shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file))) +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/kinetics.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/kinetics.html new file mode 100644 index 000000000000..b17d26e79911 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/kinetics.html @@ -0,0 +1,631 @@ + + + + + + + + + + + + torchvision.datasets.kinetics — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.kinetics

+from .utils import list_dir
+from .folder import make_dataset
+from .video_utils import VideoClips
+from .vision import VisionDataset
+
+
+
[docs]class Kinetics400(VisionDataset): + """ + `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_ + dataset. + + Kinetics-400 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the Kinetics-400 Dataset. + frames_per_clip (int): number of frames in a clip + step_between_clips (int): number of frames between each clip + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + video (Tensor[T, H, W, C]): the `T` video frames + audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + label (int): class of the video clip + """ + + def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None, + extensions=('avi',), transform=None, _precomputed_metadata=None, + num_workers=1, _video_width=0, _video_height=0, + _video_min_dimension=0, _audio_samples=0, _audio_channels=0): + super(Kinetics400, self).__init__(root) + + classes = list(sorted(list_dir(root))) + class_to_idx = {classes[i]: i for i in range(len(classes))} + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + self.classes = classes + video_list = [x[0] for x in self.samples] + self.video_clips = VideoClips( + video_list, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + _audio_channels=_audio_channels, + ) + self.transform = transform + + @property + def metadata(self): + return self.video_clips.metadata + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[video_idx][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/lsun.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/lsun.html new file mode 100644 index 000000000000..003f054d8aac --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/lsun.html @@ -0,0 +1,715 @@ + + + + + + + + + + + + torchvision.datasets.lsun — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.lsun

+from .vision import VisionDataset
+from PIL import Image
+import os
+import os.path
+import io
+import string
+from collections.abc import Iterable
+import pickle
+from typing import Any, Callable, cast, List, Optional, Tuple, Union
+from .utils import verify_str_arg, iterable_to_str
+
+
+class LSUNClass(VisionDataset):
+    def __init__(
+            self, root: str, transform: Optional[Callable] = None,
+            target_transform: Optional[Callable] = None
+    ) -> None:
+        import lmdb
+        super(LSUNClass, self).__init__(root, transform=transform,
+                                        target_transform=target_transform)
+
+        self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False,
+                             readahead=False, meminit=False)
+        with self.env.begin(write=False) as txn:
+            self.length = txn.stat()['entries']
+        cache_file = '_cache_' + ''.join(c for c in root if c in string.ascii_letters)
+        if os.path.isfile(cache_file):
+            self.keys = pickle.load(open(cache_file, "rb"))
+        else:
+            with self.env.begin(write=False) as txn:
+                self.keys = [key for key in txn.cursor().iternext(keys=True, values=False)]
+            pickle.dump(self.keys, open(cache_file, "wb"))
+
+    def __getitem__(self, index: int) -> Tuple[Any, Any]:
+        img, target = None, None
+        env = self.env
+        with env.begin(write=False) as txn:
+            imgbuf = txn.get(self.keys[index])
+
+        buf = io.BytesIO()
+        buf.write(imgbuf)
+        buf.seek(0)
+        img = Image.open(buf).convert('RGB')
+
+        if self.transform is not None:
+            img = self.transform(img)
+
+        if self.target_transform is not None:
+            target = self.target_transform(target)
+
+        return img, target
+
+    def __len__(self) -> int:
+        return self.length
+
+
+
[docs]class LSUN(VisionDataset): + """ + `LSUN <https://www.yf.io/p/lsun>`_ dataset. + + Args: + root (string): Root directory for the database files. + classes (string or list): One of {'train', 'val', 'test'} or a list of + categories to load. e,g. ['bedroom_train', 'church_outdoor_train']. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__( + self, + root: str, + classes: Union[str, List[str]] = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + super(LSUN, self).__init__(root, transform=transform, + target_transform=target_transform) + self.classes = self._verify_classes(classes) + + # for each class, create an LSUNClassDataset + self.dbs = [] + for c in self.classes: + self.dbs.append(LSUNClass( + root=root + '/' + c + '_lmdb', + transform=transform)) + + self.indices = [] + count = 0 + for db in self.dbs: + count += len(db) + self.indices.append(count) + + self.length = count + + def _verify_classes(self, classes: Union[str, List[str]]) -> List[str]: + categories = ['bedroom', 'bridge', 'church_outdoor', 'classroom', + 'conference_room', 'dining_room', 'kitchen', + 'living_room', 'restaurant', 'tower'] + dset_opts = ['train', 'val', 'test'] + + try: + classes = cast(str, classes) + verify_str_arg(classes, "classes", dset_opts) + if classes == 'test': + classes = [classes] + else: + classes = [c + '_' + classes for c in categories] + except ValueError: + if not isinstance(classes, Iterable): + msg = ("Expected type str or Iterable for argument classes, " + "but got type {}.") + raise ValueError(msg.format(type(classes))) + + classes = list(classes) + msg_fmtstr_type = ("Expected type str for elements in argument classes, " + "but got type {}.") + for c in classes: + verify_str_arg(c, custom_msg=msg_fmtstr_type.format(type(c))) + c_short = c.split('_') + category, dset_opt = '_'.join(c_short[:-1]), c_short[-1] + + msg_fmtstr = "Unknown value '{}' for {}. Valid values are {{{}}}." + msg = msg_fmtstr.format(category, "LSUN class", + iterable_to_str(categories)) + verify_str_arg(category, valid_values=categories, custom_msg=msg) + + msg = msg_fmtstr.format(dset_opt, "postfix", iterable_to_str(dset_opts)) + verify_str_arg(dset_opt, valid_values=dset_opts, custom_msg=msg) + + return classes + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target) where target is the index of the target category. + """ + target = 0 + sub = 0 + for ind in self.indices: + if index < ind: + break + target += 1 + sub = ind + + db = self.dbs[target] + index = index - sub + + if self.target_transform is not None: + target = self.target_transform(target) + + img, _ = db[index] + return img, target
+ + def __len__(self) -> int: + return self.length + + def extra_repr(self) -> str: + return "Classes: {classes}".format(**self.__dict__)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/mnist.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/mnist.html new file mode 100644 index 000000000000..0c679072f167 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/mnist.html @@ -0,0 +1,1048 @@ + + + + + + + + + + + + torchvision.datasets.mnist — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.mnist

+from .vision import VisionDataset
+import warnings
+from PIL import Image
+import os
+import os.path
+import numpy as np
+import torch
+import codecs
+import string
+import gzip
+import lzma
+from typing import Any, Callable, Dict, IO, List, Optional, Tuple, Union
+from .utils import download_url, download_and_extract_archive, extract_archive, \
+    verify_str_arg
+
+
+
[docs]class MNIST(VisionDataset): + """`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``MNIST/processed/training.pt`` + and ``MNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + resources = [ + ("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"), + ("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"), + ("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"), + ("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c") + ] + + training_file = 'training.pt' + test_file = 'test.pt' + classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine'] + + @property + def train_labels(self): + warnings.warn("train_labels has been renamed targets") + return self.targets + + @property + def test_labels(self): + warnings.warn("test_labels has been renamed targets") + return self.targets + + @property + def train_data(self): + warnings.warn("train_data has been renamed data") + return self.data + + @property + def test_data(self): + warnings.warn("test_data has been renamed data") + return self.data + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(MNIST, self).__init__(root, transform=transform, + target_transform=target_transform) + self.train = train # training set or test set + + if download: + self.download() + + if not self._check_exists(): + raise RuntimeError('Dataset not found.' + + ' You can use download=True to download it') + + if self.train: + data_file = self.training_file + else: + data_file = self.test_file + self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file)) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img.numpy(), mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) + + @property + def raw_folder(self) -> str: + return os.path.join(self.root, self.__class__.__name__, 'raw') + + @property + def processed_folder(self) -> str: + return os.path.join(self.root, self.__class__.__name__, 'processed') + + @property + def class_to_idx(self) -> Dict[str, int]: + return {_class: i for i, _class in enumerate(self.classes)} + + def _check_exists(self) -> bool: + return (os.path.exists(os.path.join(self.processed_folder, + self.training_file)) and + os.path.exists(os.path.join(self.processed_folder, + self.test_file))) + + def download(self) -> None: + """Download the MNIST data if it doesn't exist in processed_folder already.""" + + if self._check_exists(): + return + + os.makedirs(self.raw_folder, exist_ok=True) + os.makedirs(self.processed_folder, exist_ok=True) + + # download files + for url, md5 in self.resources: + filename = url.rpartition('/')[2] + download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5) + + # process and save as torch files + print('Processing...') + + training_set = ( + read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')), + read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte')) + ) + test_set = ( + read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')), + read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte')) + ) + with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f: + torch.save(training_set, f) + with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f: + torch.save(test_set, f) + + print('Done!') + + def extra_repr(self) -> str: + return "Split: {}".format("Train" if self.train is True else "Test")
+ + +
[docs]class FashionMNIST(MNIST): + """`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``FashionMNIST/processed/training.pt`` + and ``FashionMNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + resources = [ + ("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz", + "8d4fb7e6c68d591d4c3dfef9ec88bf0d"), + ("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz", + "25c81989df183df01b3e8a0aad5dffbe"), + ("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz", + "bef4ecab320f06d8554ea6380940ec79"), + ("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz", + "bb300cfdad3c16e7a12a480ee83cd310") + ] + classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', + 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
+ + +
[docs]class KMNIST(MNIST): + """`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``KMNIST/processed/training.pt`` + and ``KMNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + resources = [ + ("http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-images-idx3-ubyte.gz", "bdb82020997e1d708af4cf47b453dcf7"), + ("http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-labels-idx1-ubyte.gz", "e144d726b3acfaa3e44228e80efcd344"), + ("http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-images-idx3-ubyte.gz", "5c965bf0a639b31b8f53240b1b52f4d7"), + ("http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-labels-idx1-ubyte.gz", "7320c461ea6c1c855c0b718fb2a4b134") + ] + classes = ['o', 'ki', 'su', 'tsu', 'na', 'ha', 'ma', 'ya', 're', 'wo']
+ + +
[docs]class EMNIST(MNIST): + """`EMNIST <https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``EMNIST/processed/training.pt`` + and ``EMNIST/processed/test.pt`` exist. + split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``, + ``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies + which one to use. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + # Updated URL from https://www.nist.gov/node/1298471/emnist-dataset since the + # _official_ download link + # https://cloudstor.aarnet.edu.au/plus/s/ZNmuFiuQTqZlu9W/download + # is (currently) unavailable + url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip' + md5 = "58c8d27c78d21e728a6bc7b3cc06412e" + splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist') + # Merged Classes assumes Same structure for both uppercase and lowercase version + _merged_classes = set(['C', 'I', 'J', 'K', 'L', 'M', 'O', 'P', 'S', 'U', 'V', 'W', 'X', 'Y', 'Z']) + _all_classes = set(list(string.digits + string.ascii_letters)) + classes_split_dict = { + 'byclass': list(_all_classes), + 'bymerge': sorted(list(_all_classes - _merged_classes)), + 'balanced': sorted(list(_all_classes - _merged_classes)), + 'letters': list(string.ascii_lowercase), + 'digits': list(string.digits), + 'mnist': list(string.digits), + } + + def __init__(self, root: str, split: str, **kwargs: Any) -> None: + self.split = verify_str_arg(split, "split", self.splits) + self.training_file = self._training_file(split) + self.test_file = self._test_file(split) + super(EMNIST, self).__init__(root, **kwargs) + self.classes = self.classes_split_dict[self.split] + + @staticmethod + def _training_file(split) -> str: + return 'training_{}.pt'.format(split) + + @staticmethod + def _test_file(split) -> str: + return 'test_{}.pt'.format(split) + + def download(self) -> None: + """Download the EMNIST data if it doesn't exist in processed_folder already.""" + import shutil + + if self._check_exists(): + return + + os.makedirs(self.raw_folder, exist_ok=True) + os.makedirs(self.processed_folder, exist_ok=True) + + # download files + print('Downloading and extracting zip archive') + download_and_extract_archive(self.url, download_root=self.raw_folder, filename="emnist.zip", + remove_finished=True, md5=self.md5) + gzip_folder = os.path.join(self.raw_folder, 'gzip') + for gzip_file in os.listdir(gzip_folder): + if gzip_file.endswith('.gz'): + extract_archive(os.path.join(gzip_folder, gzip_file), gzip_folder) + + # process and save as torch files + for split in self.splits: + print('Processing ' + split) + training_set = ( + read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))), + read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split))) + ) + test_set = ( + read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))), + read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split))) + ) + with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f: + torch.save(training_set, f) + with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f: + torch.save(test_set, f) + shutil.rmtree(gzip_folder) + + print('Done!')
+ + +
[docs]class QMNIST(MNIST): + """`QMNIST <https://github.com/facebookresearch/qmnist>`_ Dataset. + + Args: + root (string): Root directory of dataset whose ``processed'' + subdir contains torch binary files with the datasets. + what (string,optional): Can be 'train', 'test', 'test10k', + 'test50k', or 'nist' for respectively the mnist compatible + training set, the 60k qmnist testing set, the 10k qmnist + examples that match the mnist testing set, the 50k + remaining qmnist testing examples, or all the nist + digits. The default is to select 'train' or 'test' + according to the compatibility argument 'train'. + compat (bool,optional): A boolean that says whether the target + for each example is class number (for compatibility with + the MNIST dataloader) or a torch vector containing the + full qmnist information. Default=True. + download (bool, optional): If true, downloads the dataset from + the internet and puts it in root directory. If dataset is + already downloaded, it is not downloaded again. + transform (callable, optional): A function/transform that + takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform + that takes in the target and transforms it. + train (bool,optional,compatibility): When argument 'what' is + not specified, this boolean decides whether to load the + training set ot the testing set. Default: True. + + """ + + subsets = { + 'train': 'train', + 'test': 'test', + 'test10k': 'test', + 'test50k': 'test', + 'nist': 'nist' + } + resources: Dict[str, List[Tuple[str, str]]] = { # type: ignore[assignment] + 'train': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-images-idx3-ubyte.gz', + 'ed72d4157d28c017586c42bc6afe6370'), + ('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-labels-idx2-int.gz', + '0058f8dd561b90ffdd0f734c6a30e5e4')], + 'test': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-images-idx3-ubyte.gz', + '1394631089c404de565df7b7aeaf9412'), + ('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-labels-idx2-int.gz', + '5b5b05890a5e13444e108efe57b788aa')], + 'nist': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-images-idx3-ubyte.xz', + '7f124b3b8ab81486c9d8c2749c17f834'), + ('https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-labels-idx2-int.xz', + '5ed0e788978e45d4a8bd4b7caec3d79d')] + } + classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine'] + + def __init__( + self, root: str, what: Optional[str] = None, compat: bool = True, + train: bool = True, **kwargs: Any + ) -> None: + if what is None: + what = 'train' if train else 'test' + self.what = verify_str_arg(what, "what", tuple(self.subsets.keys())) + self.compat = compat + self.data_file = what + '.pt' + self.training_file = self.data_file + self.test_file = self.data_file + super(QMNIST, self).__init__(root, train, **kwargs) + + def download(self) -> None: + """Download the QMNIST data if it doesn't exist in processed_folder already. + Note that we only download what has been asked for (argument 'what'). + """ + if self._check_exists(): + return + os.makedirs(self.raw_folder, exist_ok=True) + os.makedirs(self.processed_folder, exist_ok=True) + split = self.resources[self.subsets[self.what]] + files = [] + + # download data files if not already there + for url, md5 in split: + filename = url.rpartition('/')[2] + file_path = os.path.join(self.raw_folder, filename) + if not os.path.isfile(file_path): + download_url(url, root=self.raw_folder, filename=filename, md5=md5) + files.append(file_path) + + # process and save as torch files + print('Processing...') + data = read_sn3_pascalvincent_tensor(files[0]) + assert(data.dtype == torch.uint8) + assert(data.ndimension() == 3) + targets = read_sn3_pascalvincent_tensor(files[1]).long() + assert(targets.ndimension() == 2) + if self.what == 'test10k': + data = data[0:10000, :, :].clone() + targets = targets[0:10000, :].clone() + if self.what == 'test50k': + data = data[10000:, :, :].clone() + targets = targets[10000:, :].clone() + with open(os.path.join(self.processed_folder, self.data_file), 'wb') as f: + torch.save((data, targets), f) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + # redefined to handle the compat flag + img, target = self.data[index], self.targets[index] + img = Image.fromarray(img.numpy(), mode='L') + if self.transform is not None: + img = self.transform(img) + if self.compat: + target = int(target[0]) + if self.target_transform is not None: + target = self.target_transform(target) + return img, target + + def extra_repr(self) -> str: + return "Split: {}".format(self.what)
+ + +def get_int(b: bytes) -> int: + return int(codecs.encode(b, 'hex'), 16) + + +def open_maybe_compressed_file(path: Union[str, IO]) -> Union[IO, gzip.GzipFile]: + """Return a file object that possibly decompresses 'path' on the fly. + Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'. + """ + if not isinstance(path, torch._six.string_classes): + return path + if path.endswith('.gz'): + return gzip.open(path, 'rb') + if path.endswith('.xz'): + return lzma.open(path, 'rb') + return open(path, 'rb') + + +SN3_PASCALVINCENT_TYPEMAP = { + 8: (torch.uint8, np.uint8, np.uint8), + 9: (torch.int8, np.int8, np.int8), + 11: (torch.int16, np.dtype('>i2'), 'i2'), + 12: (torch.int32, np.dtype('>i4'), 'i4'), + 13: (torch.float32, np.dtype('>f4'), 'f4'), + 14: (torch.float64, np.dtype('>f8'), 'f8') +} + + +def read_sn3_pascalvincent_tensor(path: Union[str, IO], strict: bool = True) -> torch.Tensor: + """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh'). + Argument may be a filename, compressed filename, or file object. + """ + # read + with open_maybe_compressed_file(path) as f: + data = f.read() + # parse + magic = get_int(data[0:4]) + nd = magic % 256 + ty = magic // 256 + assert nd >= 1 and nd <= 3 + assert ty >= 8 and ty <= 14 + m = SN3_PASCALVINCENT_TYPEMAP[ty] + s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)] + parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) + assert parsed.shape[0] == np.prod(s) or not strict + return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s) + + +def read_label_file(path: str) -> torch.Tensor: + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert(x.dtype == torch.uint8) + assert(x.ndimension() == 1) + return x.long() + + +def read_image_file(path: str) -> torch.Tensor: + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert(x.dtype == torch.uint8) + assert(x.ndimension() == 3) + return x +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/omniglot.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/omniglot.html new file mode 100644 index 000000000000..9e8fe1a80a13 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/omniglot.html @@ -0,0 +1,650 @@ + + + + + + + + + + + + torchvision.datasets.omniglot — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.omniglot

+from PIL import Image
+from os.path import join
+import os
+from typing import Any, Callable, List, Optional, Tuple
+from .vision import VisionDataset
+from .utils import download_and_extract_archive, check_integrity, list_dir, list_files
+
+
+
[docs]class Omniglot(VisionDataset): + """`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset. + Args: + root (string): Root directory of dataset where directory + ``omniglot-py`` exists. + background (bool, optional): If True, creates dataset from the "background" set, otherwise + creates from the "evaluation" set. This terminology is defined by the authors. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset zip files from the internet and + puts it in root directory. If the zip files are already downloaded, they are not + downloaded again. + """ + folder = 'omniglot-py' + download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python' + zips_md5 = { + 'images_background': '68d2efa1b9178cc56df9314c21c6e718', + 'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811' + } + + def __init__( + self, + root: str, + background: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(Omniglot, self).__init__(join(root, self.folder), transform=transform, + target_transform=target_transform) + self.background = background + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + self.target_folder = join(self.root, self._get_target_folder()) + self._alphabets = list_dir(self.target_folder) + self._characters: List[str] = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))] + for a in self._alphabets], []) + self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')] + for idx, character in enumerate(self._characters)] + self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, []) + + def __len__(self) -> int: + return len(self._flat_character_images) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target character class. + """ + image_name, character_class = self._flat_character_images[index] + image_path = join(self.target_folder, self._characters[character_class], image_name) + image = Image.open(image_path, mode='r').convert('L') + + if self.transform: + image = self.transform(image) + + if self.target_transform: + character_class = self.target_transform(character_class) + + return image, character_class + + def _check_integrity(self) -> bool: + zip_filename = self._get_target_folder() + if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + + filename = self._get_target_folder() + zip_filename = filename + '.zip' + url = self.download_url_prefix + '/' + zip_filename + download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename]) + + def _get_target_folder(self) -> str: + return 'images_background' if self.background else 'images_evaluation'
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/phototour.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/phototour.html new file mode 100644 index 000000000000..55589fafefab --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/phototour.html @@ -0,0 +1,764 @@ + + + + + + + + + + + + torchvision.datasets.phototour — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.phototour

+import os
+import numpy as np
+from PIL import Image
+from typing import Any, Callable, List, Optional, Tuple, Union
+
+import torch
+from .vision import VisionDataset
+
+from .utils import download_url
+
+
+
[docs]class PhotoTour(VisionDataset): + """`Learning Local Image Descriptors Data <http://phototour.cs.washington.edu/patches/default.htm>`_ Dataset. + + + Args: + root (string): Root directory where images are. + name (string): Name of the dataset to load. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + urls = { + 'notredame_harris': [ + 'http://matthewalunbrown.com/patchdata/notredame_harris.zip', + 'notredame_harris.zip', + '69f8c90f78e171349abdf0307afefe4d' + ], + 'yosemite_harris': [ + 'http://matthewalunbrown.com/patchdata/yosemite_harris.zip', + 'yosemite_harris.zip', + 'a73253d1c6fbd3ba2613c45065c00d46' + ], + 'liberty_harris': [ + 'http://matthewalunbrown.com/patchdata/liberty_harris.zip', + 'liberty_harris.zip', + 'c731fcfb3abb4091110d0ae8c7ba182c' + ], + 'notredame': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip', + 'notredame.zip', + '509eda8535847b8c0a90bbb210c83484' + ], + 'yosemite': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip', + 'yosemite.zip', + '533b2e8eb7ede31be40abc317b2fd4f0' + ], + 'liberty': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip', + 'liberty.zip', + 'fdd9152f138ea5ef2091746689176414' + ], + } + means = {'notredame': 0.4854, 'yosemite': 0.4844, 'liberty': 0.4437, + 'notredame_harris': 0.4854, 'yosemite_harris': 0.4844, 'liberty_harris': 0.4437} + stds = {'notredame': 0.1864, 'yosemite': 0.1818, 'liberty': 0.2019, + 'notredame_harris': 0.1864, 'yosemite_harris': 0.1818, 'liberty_harris': 0.2019} + lens = {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, + 'liberty_harris': 379587, 'yosemite_harris': 450912, 'notredame_harris': 325295} + image_ext = 'bmp' + info_file = 'info.txt' + matches_files = 'm50_100000_100000_0.txt' + + def __init__( + self, root: str, name: str, train: bool = True, transform: Optional[Callable] = None, download: bool = False + ) -> None: + super(PhotoTour, self).__init__(root, transform=transform) + self.name = name + self.data_dir = os.path.join(self.root, name) + self.data_down = os.path.join(self.root, '{}.zip'.format(name)) + self.data_file = os.path.join(self.root, '{}.pt'.format(name)) + + self.train = train + self.mean = self.means[name] + self.std = self.stds[name] + + if download: + self.download() + + if not self._check_datafile_exists(): + raise RuntimeError('Dataset not found.' + + ' You can use download=True to download it') + + # load the serialized data + self.data, self.labels, self.matches = torch.load(self.data_file) + +
[docs] def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]: + """ + Args: + index (int): Index + + Returns: + tuple: (data1, data2, matches) + """ + if self.train: + data = self.data[index] + if self.transform is not None: + data = self.transform(data) + return data + m = self.matches[index] + data1, data2 = self.data[m[0]], self.data[m[1]] + if self.transform is not None: + data1 = self.transform(data1) + data2 = self.transform(data2) + return data1, data2, m[2]
+ + def __len__(self) -> int: + if self.train: + return self.lens[self.name] + return len(self.matches) + + def _check_datafile_exists(self) -> bool: + return os.path.exists(self.data_file) + + def _check_downloaded(self) -> bool: + return os.path.exists(self.data_dir) + + def download(self) -> None: + if self._check_datafile_exists(): + print('# Found cached data {}'.format(self.data_file)) + return + + if not self._check_downloaded(): + # download files + url = self.urls[self.name][0] + filename = self.urls[self.name][1] + md5 = self.urls[self.name][2] + fpath = os.path.join(self.root, filename) + + download_url(url, self.root, filename, md5) + + print('# Extracting data {}\n'.format(self.data_down)) + + import zipfile + with zipfile.ZipFile(fpath, 'r') as z: + z.extractall(self.data_dir) + + os.unlink(fpath) + + # process and save as torch files + print('# Caching data {}'.format(self.data_file)) + + dataset = ( + read_image_file(self.data_dir, self.image_ext, self.lens[self.name]), + read_info_file(self.data_dir, self.info_file), + read_matches_files(self.data_dir, self.matches_files) + ) + + with open(self.data_file, 'wb') as f: + torch.save(dataset, f) + + def extra_repr(self) -> str: + return "Split: {}".format("Train" if self.train is True else "Test")
+ + +def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor: + """Return a Tensor containing the patches + """ + + def PIL2array(_img: Image.Image) -> np.ndarray: + """Convert PIL image type to numpy 2D array + """ + return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64) + + def find_files(_data_dir: str, _image_ext: str) -> List[str]: + """Return a list with the file names of the images containing the patches + """ + files = [] + # find those files with the specified extension + for file_dir in os.listdir(_data_dir): + if file_dir.endswith(_image_ext): + files.append(os.path.join(_data_dir, file_dir)) + return sorted(files) # sort files in ascend order to keep relations + + patches = [] + list_files = find_files(data_dir, image_ext) + + for fpath in list_files: + img = Image.open(fpath) + for y in range(0, 1024, 64): + for x in range(0, 1024, 64): + patch = img.crop((x, y, x + 64, y + 64)) + patches.append(PIL2array(patch)) + return torch.ByteTensor(np.array(patches[:n])) + + +def read_info_file(data_dir: str, info_file: str) -> torch.Tensor: + """Return a Tensor containing the list of labels + Read the file and keep only the ID of the 3D point. + """ + labels = [] + with open(os.path.join(data_dir, info_file), 'r') as f: + labels = [int(line.split()[0]) for line in f] + return torch.LongTensor(labels) + + +def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor: + """Return a Tensor containing the ground truth matches + Read the file and keep only 3D point ID. + Matches are represented with a 1, non matches with a 0. + """ + matches = [] + with open(os.path.join(data_dir, matches_file), 'r') as f: + for line in f: + line_split = line.split() + matches.append([int(line_split[0]), int(line_split[3]), + int(line_split[1] == line_split[4])]) + return torch.LongTensor(matches) +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/places365.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/places365.html new file mode 100644 index 000000000000..e0d2e9e11873 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/places365.html @@ -0,0 +1,722 @@ + + + + + + + + + + + + torchvision.datasets.places365 — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.places365

+import os
+from os import path
+from typing import Any, Callable, Dict, List, Optional, Tuple
+from urllib.parse import urljoin
+
+from .folder import default_loader
+from .utils import verify_str_arg, check_integrity, download_and_extract_archive
+from .vision import VisionDataset
+
+
+
[docs]class Places365(VisionDataset): + r"""`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset. + + Args: + root (string): Root directory of the Places365 dataset. + split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challendge``, + ``val``. + small (bool, optional): If ``True``, uses the small images, i. e. resized to 256 x 256 pixels, instead of the + high resolution ones. + download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already + downloaded archives are not downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + + Attributes: + classes (list): List of the class names. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + targets (list): The class_index value for each image in the dataset + + Raises: + RuntimeError: If ``download is False`` and the meta files, i. e. the devkit, are not present or corrupted. + RuntimeError: If ``download is True`` and the image archive is already extracted. + """ + _SPLITS = ("train-standard", "train-challenge", "val") + _BASE_URL = "http://data.csail.mit.edu/places/places365/" + # {variant: (archive, md5)} + _DEVKIT_META = { + "standard": ("filelist_places365-standard.tar", "35a0585fee1fa656440f3ab298f8479c"), + "challenge": ("filelist_places365-challenge.tar", "70a8307e459c3de41690a7c76c931734"), + } + # (file, md5) + _CATEGORIES_META = ("categories_places365.txt", "06c963b85866bd0649f97cb43dd16673") + # {split: (file, md5)} + _FILE_LIST_META = { + "train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a"), + "train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57"), + "val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1"), + } + # {(split, small): (file, md5)} + _IMAGES_META = { + ("train-standard", False): ("train_large_places365standard.tar", "67e186b496a84c929568076ed01a8aa1"), + ("train-challenge", False): ("train_large_places365challenge.tar", "605f18e68e510c82b958664ea134545f"), + ("val", False): ("val_large.tar", "9b71c4993ad89d2d8bcbdc4aef38042f"), + ("train-standard", True): ("train_256_places365standard.tar", "53ca1c756c3d1e7809517cc47c5561c5"), + ("train-challenge", True): ("train_256_places365challenge.tar", "741915038a5e3471ec7332404dfb64ef"), + ("val", True): ("val_256.tar", "e27b17d8d44f4af9a78502beb927f808"), + } + + def __init__( + self, + root: str, + split: str = "train-standard", + small: bool = False, + download: bool = False, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + loader: Callable[[str], Any] = default_loader, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + + self.split = self._verify_split(split) + self.small = small + self.loader = loader + + self.classes, self.class_to_idx = self.load_categories(download) + self.imgs, self.targets = self.load_file_list(download) + + if download: + self.download_images() + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + file, target = self.imgs[index] + image = self.loader(file) + + if self.transforms is not None: + image, target = self.transforms(image, target) + + return image, target + + def __len__(self) -> int: + return len(self.imgs) + + @property + def variant(self) -> str: + return "challenge" if "challenge" in self.split else "standard" + + @property + def images_dir(self) -> str: + size = "256" if self.small else "large" + if self.split.startswith("train"): + dir = f"data_{size}_{self.variant}" + else: + dir = f"{self.split}_{size}" + return path.join(self.root, dir) + + def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]: + def process(line: str) -> Tuple[str, int]: + cls, idx = line.split() + return cls, int(idx) + + file, md5 = self._CATEGORIES_META + file = path.join(self.root, file) + if not self._check_integrity(file, md5, download): + self.download_devkit() + + with open(file, "r") as fh: + class_to_idx = dict(process(line) for line in fh) + + return sorted(class_to_idx.keys()), class_to_idx + + def load_file_list(self, download: bool = True) -> Tuple[List[Tuple[str, int]], List[int]]: + def process(line: str, sep="/") -> Tuple[str, int]: + image, idx = line.split() + return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), int(idx) + + file, md5 = self._FILE_LIST_META[self.split] + file = path.join(self.root, file) + if not self._check_integrity(file, md5, download): + self.download_devkit() + + with open(file, "r") as fh: + images = [process(line) for line in fh] + + _, targets = zip(*images) + return images, list(targets) + + def download_devkit(self) -> None: + file, md5 = self._DEVKIT_META[self.variant] + download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5) + + def download_images(self) -> None: + if path.exists(self.images_dir): + raise RuntimeError( + f"The directory {self.images_dir} already exists. If you want to re-download or re-extract the images, " + f"delete the directory." + ) + + file, md5 = self._IMAGES_META[(self.split, self.small)] + download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5) + + if self.split.startswith("train"): + os.rename(self.images_dir.rsplit("_", 1)[0], self.images_dir) + + def extra_repr(self) -> str: + return "\n".join(("Split: {split}", "Small: {small}")).format(**self.__dict__) + + def _verify_split(self, split: str) -> str: + return verify_str_arg(split, "split", self._SPLITS) + + def _check_integrity(self, file: str, md5: str, download: bool) -> bool: + integrity = check_integrity(path.join(self.root, file), md5=md5) + if not integrity and not download: + raise RuntimeError( + f"The file {file} does not exist or is corrupted. You can set download=True to download it." + ) + return integrity
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/sbd.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/sbd.html new file mode 100644 index 000000000000..633d42ef3e19 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/sbd.html @@ -0,0 +1,679 @@ + + + + + + + + + + + + torchvision.datasets.sbd — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.sbd

+import os
+import shutil
+from .vision import VisionDataset
+from typing import Any, Callable, Optional, Tuple
+
+import numpy as np
+
+from PIL import Image
+from .utils import download_url, verify_str_arg
+from .voc import download_extract
+
+
+
[docs]class SBDataset(VisionDataset): + """`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_ + + The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset. + + .. note :: + + Please note that the train and val splits included with this dataset are different from + the splits in the PASCAL VOC dataset. In particular some "train" images might be part of + VOC2012 val. + If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`, + which excludes all val images. + + .. warning:: + + This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format. + + Args: + root (string): Root directory of the Semantic Boundaries Dataset + image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``. + Image set ``train_noval`` excludes VOC 2012 val images. + mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'. + In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`, + where `num_classes=20`. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. Input sample is PIL image and target is a numpy array + if `mode='boundaries'` or PIL image if `mode='segmentation'`. + """ + + url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz" + md5 = "82b4d87ceb2ed10f6038a1cba92111cb" + filename = "benchmark.tgz" + + voc_train_url = "http://home.bharathh.info/pubs/codes/SBD/train_noval.txt" + voc_split_filename = "train_noval.txt" + voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722" + + def __init__( + self, + root: str, + image_set: str = "train", + mode: str = "boundaries", + download: bool = False, + transforms: Optional[Callable] = None, + ) -> None: + + try: + from scipy.io import loadmat + self._loadmat = loadmat + except ImportError: + raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: " + "pip install scipy") + + super(SBDataset, self).__init__(root, transforms) + self.image_set = verify_str_arg(image_set, "image_set", + ("train", "val", "train_noval")) + self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries")) + self.num_classes = 20 + + sbd_root = self.root + image_dir = os.path.join(sbd_root, 'img') + mask_dir = os.path.join(sbd_root, 'cls') + + if download: + download_extract(self.url, self.root, self.filename, self.md5) + extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset") + for f in ["cls", "img", "inst", "train.txt", "val.txt"]: + old_path = os.path.join(extracted_ds_root, f) + shutil.move(old_path, sbd_root) + download_url(self.voc_train_url, sbd_root, self.voc_split_filename, + self.voc_split_md5) + + if not os.path.isdir(sbd_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + split_f = os.path.join(sbd_root, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as fh: + file_names = [x.strip() for x in fh.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names] + assert (len(self.images) == len(self.masks)) + + self._get_target = self._get_segmentation_target \ + if self.mode == "segmentation" else self._get_boundaries_target + + def _get_segmentation_target(self, filepath: str) -> Image.Image: + mat = self._loadmat(filepath) + return Image.fromarray(mat['GTcls'][0]['Segmentation'][0]) + + def _get_boundaries_target(self, filepath: str) -> np.ndarray: + mat = self._loadmat(filepath) + return np.concatenate([np.expand_dims(mat['GTcls'][0]['Boundaries'][0][i][0].toarray(), axis=0) + for i in range(self.num_classes)], axis=0) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + img = Image.open(self.images[index]).convert('RGB') + target = self._get_target(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def __len__(self) -> int: + return len(self.images) + + def extra_repr(self) -> str: + lines = ["Image set: {image_set}", "Mode: {mode}"] + return '\n'.join(lines).format(**self.__dict__)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/sbu.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/sbu.html new file mode 100644 index 000000000000..7e05361aea97 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/sbu.html @@ -0,0 +1,666 @@ + + + + + + + + + + + + torchvision.datasets.sbu — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.sbu

+from PIL import Image
+from .utils import download_url, check_integrity
+from typing import Any, Callable, Optional, Tuple
+
+import os
+from .vision import VisionDataset
+
+
+
[docs]class SBU(VisionDataset): + """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset. + + Args: + root (string): Root directory of dataset where tarball + ``SBUCaptionedPhotoDataset.tar.gz`` exists. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz" + filename = "SBUCaptionedPhotoDataset.tar.gz" + md5_checksum = '9aec147b3488753cf758b4d493422285' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = True, + ) -> None: + super(SBU, self).__init__(root, transform=transform, + target_transform=target_transform) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # Read the caption for each photo + self.photos = [] + self.captions = [] + + file1 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt') + file2 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_captions.txt') + + for line1, line2 in zip(open(file1), open(file2)): + url = line1.rstrip() + photo = os.path.basename(url) + filename = os.path.join(self.root, 'dataset', photo) + if os.path.exists(filename): + caption = line2.rstrip() + self.photos.append(photo) + self.captions.append(caption) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a caption for the photo. + """ + filename = os.path.join(self.root, 'dataset', self.photos[index]) + img = Image.open(filename).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + target = self.captions[index] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + """The number of photos in the dataset.""" + return len(self.photos) + + def _check_integrity(self) -> bool: + """Check the md5 checksum of the downloaded tarball.""" + root = self.root + fpath = os.path.join(root, self.filename) + if not check_integrity(fpath, self.md5_checksum): + return False + return True + + def download(self) -> None: + """Download and extract the tarball, and download each individual photo.""" + import tarfile + + if self._check_integrity(): + print('Files already downloaded and verified') + return + + download_url(self.url, self.root, self.filename, self.md5_checksum) + + # Extract file + with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar: + tar.extractall(path=self.root) + + # Download individual photos + with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh: + for line in fh: + url = line.rstrip() + try: + download_url(url, os.path.join(self.root, 'dataset')) + except OSError: + # The images point to public images on Flickr. + # Note: Images might be removed by users at anytime. + pass
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/stl10.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/stl10.html new file mode 100644 index 000000000000..22c67e9be9b3 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/stl10.html @@ -0,0 +1,738 @@ + + + + + + + + + + + + torchvision.datasets.stl10 — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.stl10

+from PIL import Image
+import os
+import os.path
+import numpy as np
+from typing import Any, Callable, Optional, Tuple
+
+from .vision import VisionDataset
+from .utils import check_integrity, download_and_extract_archive, verify_str_arg
+
+
+
[docs]class STL10(VisionDataset): + """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``stl10_binary`` exists. + split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}. + Accordingly dataset is selected. + folds (int, optional): One of {0-9} or None. + For training, loads one of the 10 pre-defined folds of 1k samples for the + standard evaluation procedure. If no value is passed, loads the 5k samples. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + base_folder = 'stl10_binary' + url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz" + filename = "stl10_binary.tar.gz" + tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb' + class_names_file = 'class_names.txt' + folds_list_file = 'fold_indices.txt' + train_list = [ + ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], + ['train_y.bin', '5a34089d4802c674881badbb80307741'], + ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4'] + ] + + test_list = [ + ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'], + ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e'] + ] + splits = ('train', 'train+unlabeled', 'unlabeled', 'test') + + def __init__( + self, + root: str, + split: str = "train", + folds: Optional[int] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(STL10, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = verify_str_arg(split, "split", self.splits) + self.folds = self._verify_folds(folds) + + if download: + self.download() + elif not self._check_integrity(): + raise RuntimeError( + 'Dataset not found or corrupted. ' + 'You can use download=True to download it') + + # now load the picked numpy arrays + self.labels: np.ndarray + if self.split == 'train': + self.data, self.labels = self.__loadfile( + self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + + elif self.split == 'train+unlabeled': + self.data, self.labels = self.__loadfile( + self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + unlabeled_data, _ = self.__loadfile(self.train_list[2][0]) + self.data = np.concatenate((self.data, unlabeled_data)) + self.labels = np.concatenate( + (self.labels, np.asarray([-1] * unlabeled_data.shape[0]))) + + elif self.split == 'unlabeled': + self.data, _ = self.__loadfile(self.train_list[2][0]) + self.labels = np.asarray([-1] * self.data.shape[0]) + else: # self.split == 'test': + self.data, self.labels = self.__loadfile( + self.test_list[0][0], self.test_list[1][0]) + + class_file = os.path.join( + self.root, self.base_folder, self.class_names_file) + if os.path.isfile(class_file): + with open(class_file) as f: + self.classes = f.read().splitlines() + + def _verify_folds(self, folds: Optional[int]) -> Optional[int]: + if folds is None: + return folds + elif isinstance(folds, int): + if folds in range(10): + return folds + msg = ("Value for argument folds should be in the range [0, 10), " + "but got {}.") + raise ValueError(msg.format(folds)) + else: + msg = "Expected type None or int for argument folds, but got type {}." + raise ValueError(msg.format(type(folds))) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + target: Optional[int] + if self.labels is not None: + img, target = self.data[index], int(self.labels[index]) + else: + img, target = self.data[index], None + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + return self.data.shape[0] + + def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + labels = None + if labels_file: + path_to_labels = os.path.join( + self.root, self.base_folder, labels_file) + with open(path_to_labels, 'rb') as f: + labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based + + path_to_data = os.path.join(self.root, self.base_folder, data_file) + with open(path_to_data, 'rb') as f: + # read whole file in uint8 chunks + everything = np.fromfile(f, dtype=np.uint8) + images = np.reshape(everything, (-1, 3, 96, 96)) + images = np.transpose(images, (0, 1, 3, 2)) + + return images, labels + + def _check_integrity(self) -> bool: + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + self._check_integrity() + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) + + def __load_folds(self, folds: Optional[int]) -> None: + # loads one of the folds if specified + if folds is None: + return + path_to_folds = os.path.join( + self.root, self.base_folder, self.folds_list_file) + with open(path_to_folds, 'r') as f: + str_idx = f.read().splitlines()[folds] + list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ') + self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/svhn.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/svhn.html new file mode 100644 index 000000000000..1570ba75c920 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/svhn.html @@ -0,0 +1,673 @@ + + + + + + + + + + + + torchvision.datasets.svhn — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.svhn

+from .vision import VisionDataset
+from PIL import Image
+import os
+import os.path
+import numpy as np
+from typing import Any, Callable, Optional, Tuple
+from .utils import download_url, check_integrity, verify_str_arg
+
+
+
[docs]class SVHN(VisionDataset): + """`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset. + Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset, + we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which + expect the class labels to be in the range `[0, C-1]` + + .. warning:: + + This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format. + + Args: + root (string): Root directory of dataset where directory + ``SVHN`` exists. + split (string): One of {'train', 'test', 'extra'}. + Accordingly dataset is selected. 'extra' is Extra training set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + split_list = { + 'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat", + "train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"], + 'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat", + "test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"], + 'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat", + "extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]} + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(SVHN, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = verify_str_arg(split, "split", tuple(self.split_list.keys())) + self.url = self.split_list[split][0] + self.filename = self.split_list[split][1] + self.file_md5 = self.split_list[split][2] + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # import here rather than at top of file because this is + # an optional dependency for torchvision + import scipy.io as sio + + # reading(loading) mat file as array + loaded_mat = sio.loadmat(os.path.join(self.root, self.filename)) + + self.data = loaded_mat['X'] + # loading from the .mat file gives an np array of type np.uint8 + # converting to np.int64, so that we have a LongTensor after + # the conversion from the numpy array + # the squeeze is needed to obtain a 1D tensor + self.labels = loaded_mat['y'].astype(np.int64).squeeze() + + # the svhn dataset assigns the class label "10" to the digit 0 + # this makes it inconsistent with several loss functions + # which expect the class labels to be in the range [0, C-1] + np.place(self.labels, self.labels == 10, 0) + self.data = np.transpose(self.data, (3, 2, 0, 1)) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.labels[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + return len(self.data) + + def _check_integrity(self) -> bool: + root = self.root + md5 = self.split_list[self.split][2] + fpath = os.path.join(root, self.filename) + return check_integrity(fpath, md5) + + def download(self) -> None: + md5 = self.split_list[self.split][2] + download_url(self.url, self.root, self.filename, md5) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/ucf101.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/ucf101.html new file mode 100644 index 000000000000..5f3c274b99c1 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/ucf101.html @@ -0,0 +1,659 @@ + + + + + + + + + + + + torchvision.datasets.ucf101 — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.ucf101

+import glob
+import os
+
+from .utils import list_dir
+from .folder import make_dataset
+from .video_utils import VideoClips
+from .vision import VisionDataset
+
+
+
[docs]class UCF101(VisionDataset): + """ + `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset. + + UCF101 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the UCF101 Dataset. + annotation_path (str): path to the folder containing the split files + frames_per_clip (int): number of frames in a clip. + step_between_clips (int, optional): number of frames between each clip. + fold (int, optional): which fold to use. Should be between 1 and 3. + train (bool, optional): if ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + video (Tensor[T, H, W, C]): the `T` video frames + audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + label (int): class of the video clip + """ + + def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1, + frame_rate=None, fold=1, train=True, transform=None, + _precomputed_metadata=None, num_workers=1, _video_width=0, + _video_height=0, _video_min_dimension=0, _audio_samples=0): + super(UCF101, self).__init__(root) + if not 1 <= fold <= 3: + raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + + extensions = ('avi',) + self.fold = fold + self.train = train + + classes = list(sorted(list_dir(root))) + class_to_idx = {classes[i]: i for i in range(len(classes))} + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + self.classes = classes + video_list = [x[0] for x in self.samples] + video_clips = VideoClips( + video_list, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + ) + self.video_clips_metadata = video_clips.metadata + self.indices = self._select_fold(video_list, annotation_path, fold, train) + self.video_clips = video_clips.subset(self.indices) + self.transform = transform + + @property + def metadata(self): + return self.video_clips_metadata + + def _select_fold(self, video_list, annotation_path, fold, train): + name = "train" if train else "test" + name = "{}list{:02d}.txt".format(name, fold) + f = os.path.join(annotation_path, name) + selected_files = [] + with open(f, "r") as fid: + data = fid.readlines() + data = [x.strip().split(" ") for x in data] + data = [os.path.join(self.root, x[0]) for x in data] + selected_files.extend(data) + selected_files = set(selected_files) + indices = [i for i in range(len(video_list)) if video_list[i] in selected_files] + return indices + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[self.indices[video_idx]][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/usps.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/usps.html new file mode 100644 index 000000000000..aaafe652ae60 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/usps.html @@ -0,0 +1,643 @@ + + + + + + + + + + + + torchvision.datasets.usps — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.usps

+from PIL import Image
+import os
+import numpy as np
+from typing import Any, Callable, cast, Optional, Tuple
+
+from .utils import download_url
+from .vision import VisionDataset
+
+
+
[docs]class USPS(VisionDataset): + """`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset. + The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``. + The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]`` + and make pixel values in ``[0, 255]``. + + Args: + root (string): Root directory of dataset to store``USPS`` data files. + train (bool, optional): If True, creates dataset from ``usps.bz2``, + otherwise from ``usps.t.bz2``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + split_list = { + 'train': [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2", + "usps.bz2", 'ec16c51db3855ca6c91edd34d0e9b197' + ], + 'test': [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2", + "usps.t.bz2", '8ea070ee2aca1ac39742fdd1ef5ed118' + ], + } + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(USPS, self).__init__(root, transform=transform, + target_transform=target_transform) + split = 'train' if train else 'test' + url, filename, checksum = self.split_list[split] + full_path = os.path.join(self.root, filename) + + if download and not os.path.exists(full_path): + download_url(url, self.root, filename, md5=checksum) + + import bz2 + with bz2.open(full_path) as fp: + raw_data = [line.decode().split() for line in fp.readlines()] + imgs = [[x.split(':')[-1] for x in data[1:]] for data in raw_data] + imgs = np.asarray(imgs, dtype=np.float32).reshape((-1, 16, 16)) + imgs = ((cast(np.ndarray, imgs) + 1) / 2 * 255).astype(dtype=np.uint8) + targets = [int(d[0]) - 1 for d in raw_data] + + self.data = imgs + self.targets = targets + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img, mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
+ + def __len__(self) -> int: + return len(self.data)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/datasets/voc.html b/docs/1.7.0/torchvision/_modules/torchvision/datasets/voc.html new file mode 100644 index 000000000000..c067350e457c --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/datasets/voc.html @@ -0,0 +1,799 @@ + + + + + + + + + + + + torchvision.datasets.voc — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.datasets.voc

+import os
+import tarfile
+import collections
+from .vision import VisionDataset
+import xml.etree.ElementTree as ET
+from PIL import Image
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from .utils import download_url, check_integrity, verify_str_arg
+
+DATASET_YEAR_DICT = {
+    '2012': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
+        'filename': 'VOCtrainval_11-May-2012.tar',
+        'md5': '6cd6e144f989b92b3379bac3b3de84fd',
+        'base_dir': os.path.join('VOCdevkit', 'VOC2012')
+    },
+    '2011': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
+        'filename': 'VOCtrainval_25-May-2011.tar',
+        'md5': '6c3384ef61512963050cb5d687e5bf1e',
+        'base_dir': os.path.join('TrainVal', 'VOCdevkit', 'VOC2011')
+    },
+    '2010': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
+        'filename': 'VOCtrainval_03-May-2010.tar',
+        'md5': 'da459979d0c395079b5c75ee67908abb',
+        'base_dir': os.path.join('VOCdevkit', 'VOC2010')
+    },
+    '2009': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
+        'filename': 'VOCtrainval_11-May-2009.tar',
+        'md5': '59065e4b188729180974ef6572f6a212',
+        'base_dir': os.path.join('VOCdevkit', 'VOC2009')
+    },
+    '2008': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
+        'filename': 'VOCtrainval_11-May-2012.tar',
+        'md5': '2629fa636546599198acfcfbfcf1904a',
+        'base_dir': os.path.join('VOCdevkit', 'VOC2008')
+    },
+    '2007': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
+        'filename': 'VOCtrainval_06-Nov-2007.tar',
+        'md5': 'c52e279531787c972589f7e41ab4ae64',
+        'base_dir': os.path.join('VOCdevkit', 'VOC2007')
+    },
+    '2007-test': {
+        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
+        'filename': 'VOCtest_06-Nov-2007.tar',
+        'md5': 'b6e924de25625d8de591ea690078ad9f',
+        'base_dir': os.path.join('VOCdevkit', 'VOC2007')
+    }
+}
+
+
+
[docs]class VOCSegmentation(VisionDataset): + """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years 2007 to 2012. + image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val`` + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__( + self, + root: str, + year: str = "2012", + image_set: str = "train", + download: bool = False, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ): + super(VOCSegmentation, self).__init__(root, transforms, transform, target_transform) + self.year = year + if year == "2007" and image_set == "test": + year = "2007-test" + self.url = DATASET_YEAR_DICT[year]['url'] + self.filename = DATASET_YEAR_DICT[year]['filename'] + self.md5 = DATASET_YEAR_DICT[year]['md5'] + valid_sets = ["train", "trainval", "val"] + if year == "2007-test": + valid_sets.append("test") + self.image_set = verify_str_arg(image_set, "image_set", valid_sets) + base_dir = DATASET_YEAR_DICT[year]['base_dir'] + voc_root = os.path.join(self.root, base_dir) + image_dir = os.path.join(voc_root, 'JPEGImages') + mask_dir = os.path.join(voc_root, 'SegmentationClass') + + if download: + download_extract(self.url, self.root, self.filename, self.md5) + + if not os.path.isdir(voc_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + splits_dir = os.path.join(voc_root, 'ImageSets/Segmentation') + + split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as f: + file_names = [x.strip() for x in f.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names] + assert (len(self.images) == len(self.masks)) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is the image segmentation. + """ + img = Image.open(self.images[index]).convert('RGB') + target = Image.open(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
+ + def __len__(self) -> int: + return len(self.images)
+ + +
[docs]class VOCDetection(VisionDataset): + """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years 2007 to 2012. + image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val`` + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + (default: alphabetic indexing of VOC's 20 classes). + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, required): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__( + self, + root: str, + year: str = "2012", + image_set: str = "train", + download: bool = False, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ): + super(VOCDetection, self).__init__(root, transforms, transform, target_transform) + self.year = year + if year == "2007" and image_set == "test": + year = "2007-test" + self.url = DATASET_YEAR_DICT[year]['url'] + self.filename = DATASET_YEAR_DICT[year]['filename'] + self.md5 = DATASET_YEAR_DICT[year]['md5'] + valid_sets = ["train", "trainval", "val"] + if year == "2007-test": + valid_sets.append("test") + self.image_set = verify_str_arg(image_set, "image_set", valid_sets) + + base_dir = DATASET_YEAR_DICT[year]['base_dir'] + voc_root = os.path.join(self.root, base_dir) + image_dir = os.path.join(voc_root, 'JPEGImages') + annotation_dir = os.path.join(voc_root, 'Annotations') + + if download: + download_extract(self.url, self.root, self.filename, self.md5) + + if not os.path.isdir(voc_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + splits_dir = os.path.join(voc_root, 'ImageSets/Main') + + split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as f: + file_names = [x.strip() for x in f.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.annotations = [os.path.join(annotation_dir, x + ".xml") for x in file_names] + assert (len(self.images) == len(self.annotations)) + +
[docs] def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a dictionary of the XML tree. + """ + img = Image.open(self.images[index]).convert('RGB') + target = self.parse_voc_xml( + ET.parse(self.annotations[index]).getroot()) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
+ + def __len__(self) -> int: + return len(self.images) + + def parse_voc_xml(self, node: ET.Element) -> Dict[str, Any]: + voc_dict: Dict[str, Any] = {} + children = list(node) + if children: + def_dic: Dict[str, Any] = collections.defaultdict(list) + for dc in map(self.parse_voc_xml, children): + for ind, v in dc.items(): + def_dic[ind].append(v) + if node.tag == 'annotation': + def_dic['object'] = [def_dic['object']] + voc_dict = { + node.tag: + {ind: v[0] if len(v) == 1 else v + for ind, v in def_dic.items()} + } + if node.text: + text = node.text.strip() + if not children: + voc_dict[node.tag] = text + return voc_dict
+ + +def download_extract(url: str, root: str, filename: str, md5: str) -> None: + download_url(url, root, filename, md5) + with tarfile.open(os.path.join(root, filename), "r") as tar: + tar.extractall(path=root) +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/io/image.html b/docs/1.7.0/torchvision/_modules/torchvision/io/image.html new file mode 100644 index 000000000000..039fb8b6ebd0 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/io/image.html @@ -0,0 +1,758 @@ + + + + + + + + + + + + torchvision.io.image — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.io.image

+import torch
+
+import os
+import os.path as osp
+import importlib.machinery
+
+_HAS_IMAGE_OPT = False
+
+try:
+    lib_dir = osp.join(osp.dirname(__file__), "..")
+
+    loader_details = (
+        importlib.machinery.ExtensionFileLoader,
+        importlib.machinery.EXTENSION_SUFFIXES
+    )
+
+    extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)  # type: ignore[arg-type]
+    ext_specs = extfinder.find_spec("image")
+    if ext_specs is not None:
+        torch.ops.load_library(ext_specs.origin)
+        _HAS_IMAGE_OPT = True
+except (ImportError, OSError):
+    pass
+
+
+def read_file(path: str) -> torch.Tensor:
+    """
+    Reads and outputs the bytes contents of a file as a uint8 Tensor
+    with one dimension.
+
+    Arguments:
+        path (str): the path to the file to be read
+
+    Returns:
+        data (Tensor)
+    """
+    data = torch.ops.image.read_file(path)
+    return data
+
+
+def write_file(filename: str, data: torch.Tensor) -> None:
+    """
+    Writes the contents of a uint8 tensor with one dimension to a
+    file.
+
+    Arguments:
+        filename (str): the path to the file to be written
+        data (Tensor): the contents to be written to the output file
+    """
+    torch.ops.image.write_file(filename, data)
+
+
+def decode_png(input: torch.Tensor) -> torch.Tensor:
+    """
+    Decodes a PNG image into a 3 dimensional RGB Tensor.
+    The values of the output tensor are uint8 between 0 and 255.
+
+    Arguments:
+        input (Tensor[1]): a one dimensional uint8 tensor containing
+    the raw bytes of the PNG image.
+
+    Returns:
+        output (Tensor[3, image_height, image_width])
+    """
+    output = torch.ops.image.decode_png(input)
+    return output
+
+
+
[docs]def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor: + """ + Takes an input tensor in CHW layout and returns a buffer with the contents + of its corresponding PNG file. + + Parameters + ---------- + input: Tensor[channels, image_height, image_width] + int8 image tensor of `c` channels, where `c` must 3 or 1. + compression_level: int + Compression factor for the resulting file, it must be a number + between 0 and 9. Default: 6 + + Returns + ------- + output: Tensor[1] + A one dimensional int8 tensor that contains the raw bytes of the + PNG file. + """ + output = torch.ops.image.encode_png(input, compression_level) + return output
+ + +
[docs]def write_png(input: torch.Tensor, filename: str, compression_level: int = 6): + """ + Takes an input tensor in CHW layout (or HW in the case of grayscale images) + and saves it in a PNG file. + + Parameters + ---------- + input: Tensor[channels, image_height, image_width] + int8 image tensor of `c` channels, where `c` must be 1 or 3. + filename: str + Path to save the image. + compression_level: int + Compression factor for the resulting file, it must be a number + between 0 and 9. Default: 6 + """ + output = encode_png(input, compression_level) + write_file(filename, output)
+ + +def decode_jpeg(input: torch.Tensor) -> torch.Tensor: + """ + Decodes a JPEG image into a 3 dimensional RGB Tensor. + The values of the output tensor are uint8 between 0 and 255. + Arguments: + input (Tensor[1]): a one dimensional uint8 tensor containing + the raw bytes of the JPEG image. + Returns: + output (Tensor[3, image_height, image_width]) + """ + output = torch.ops.image.decode_jpeg(input) + return output + + +
[docs]def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: + """ + Takes an input tensor in CHW layout and returns a buffer with the contents + of its corresponding JPEG file. + + Parameters + ---------- + input: Tensor[channels, image_height, image_width]) + int8 image tensor of `c` channels, where `c` must be 1 or 3. + quality: int + Quality of the resulting JPEG file, it must be a number between + 1 and 100. Default: 75 + + Returns + ------- + output: Tensor[1] + A one dimensional int8 tensor that contains the raw bytes of the + JPEG file. + """ + if quality < 1 or quality > 100: + raise ValueError('Image quality should be a positive number ' + 'between 1 and 100') + + output = torch.ops.image.encode_jpeg(input, quality) + return output
+ + +
[docs]def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): + """ + Takes an input tensor in CHW layout and saves it in a JPEG file. + + Parameters + ---------- + input: Tensor[channels, image_height, image_width] + int8 image tensor of `c` channels, where `c` must be 1 or 3. + filename: str + Path to save the image. + quality: int + Quality of the resulting JPEG file, it must be a number + between 1 and 100. Default: 75 + """ + output = encode_jpeg(input, quality) + write_file(filename, output)
+ + +
[docs]def decode_image(input: torch.Tensor) -> torch.Tensor: + """ + Detects whether an image is a JPEG or PNG and performs the appropriate + operation to decode the image into a 3 dimensional RGB Tensor. + + The values of the output tensor are uint8 between 0 and 255. + + Parameters + ---------- + input: Tensor + a one dimensional uint8 tensor containing the raw bytes of the + PNG or JPEG image. + + Returns + ------- + output: Tensor[3, image_height, image_width] + """ + output = torch.ops.image.decode_image(input) + return output
+ + +
[docs]def read_image(path: str) -> torch.Tensor: + """ + Reads a JPEG or PNG image into a 3 dimensional RGB Tensor. + The values of the output tensor are uint8 between 0 and 255. + + Parameters + ---------- + path: str + path of the JPEG or PNG image. + + Returns + ------- + output: Tensor[3, image_height, image_width] + """ + data = read_file(path) + return decode_image(data)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/io/video.html b/docs/1.7.0/torchvision/_modules/torchvision/io/video.html new file mode 100644 index 000000000000..69df19aa65dc --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/io/video.html @@ -0,0 +1,916 @@ + + + + + + + + + + + + torchvision.io.video — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.io.video

+import gc
+import math
+import re
+import warnings
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+
+from . import _video_opt
+from ._video_opt import VideoMetaData
+
+
+try:
+    import av
+
+    av.logging.set_level(av.logging.ERROR)
+    if not hasattr(av.video.frame.VideoFrame, "pict_type"):
+        av = ImportError(
+            """\
+Your version of PyAV is too old for the necessary video operations in torchvision.
+If you are on Python 3.5, you will have to build from source (the conda-forge
+packages are not up-to-date).  See
+https://github.com/mikeboers/PyAV#installation for instructions on how to
+install PyAV on your system.
+"""
+        )
+except ImportError:
+    av = ImportError(
+        """\
+PyAV is not installed, and is necessary for the video operations in torchvision.
+See https://github.com/mikeboers/PyAV#installation for instructions on how to
+install PyAV on your system.
+"""
+    )
+
+
+def _check_av_available() -> None:
+    if isinstance(av, Exception):
+        raise av
+
+
+def _av_available() -> bool:
+    return not isinstance(av, Exception)
+
+
+# PyAV has some reference cycles
+_CALLED_TIMES = 0
+_GC_COLLECTION_INTERVAL = 10
+
+
+
[docs]def write_video( + filename: str, + video_array: torch.Tensor, + fps: float, + video_codec: str = "libx264", + options: Optional[Dict[str, Any]] = None, +) -> None: + """ + Writes a 4d tensor in [T, H, W, C] format in a video file + + Parameters + ---------- + filename : str + path where the video will be saved + video_array : Tensor[T, H, W, C] + tensor containing the individual frames, as a uint8 tensor in [T, H, W, C] format + fps : Number + frames per second + """ + _check_av_available() + video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy() + + # PyAV does not support floating point numbers with decimal point + # and will throw OverflowException in case this is not the case + if isinstance(fps, float): + fps = np.round(fps) + + with av.open(filename, mode="w") as container: + stream = container.add_stream(video_codec, rate=fps) + stream.width = video_array.shape[2] + stream.height = video_array.shape[1] + stream.pix_fmt = "yuv420p" if video_codec != "libx264rgb" else "rgb24" + stream.options = options or {} + + for img in video_array: + frame = av.VideoFrame.from_ndarray(img, format="rgb24") + frame.pict_type = "NONE" + for packet in stream.encode(frame): + container.mux(packet) + + # Flush stream + for packet in stream.encode(): + container.mux(packet)
+ + +def _read_from_stream( + container: "av.container.Container", + start_offset: float, + end_offset: float, + pts_unit: str, + stream: "av.stream.Stream", + stream_name: Dict[str, Optional[Union[int, Tuple[int, ...], List[int]]]], +) -> List["av.frame.Frame"]: + global _CALLED_TIMES, _GC_COLLECTION_INTERVAL + _CALLED_TIMES += 1 + if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1: + gc.collect() + + if pts_unit == "sec": + start_offset = int(math.floor(start_offset * (1 / stream.time_base))) + if end_offset != float("inf"): + end_offset = int(math.ceil(end_offset * (1 / stream.time_base))) + else: + warnings.warn( + "The pts_unit 'pts' gives wrong results and will be removed in a " + + "follow-up version. Please use pts_unit 'sec'." + ) + + frames = {} + should_buffer = True + max_buffer_size = 5 + if stream.type == "video": + # DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt) + # so need to buffer some extra frames to sort everything + # properly + extradata = stream.codec_context.extradata + # overly complicated way of finding if `divx_packed` is set, following + # https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263 + if extradata and b"DivX" in extradata: + # can't use regex directly because of some weird characters sometimes... + pos = extradata.find(b"DivX") + d = extradata[pos:] + o = re.search(br"DivX(\d+)Build(\d+)(\w)", d) + if o is None: + o = re.search(br"DivX(\d+)b(\d+)(\w)", d) + if o is not None: + should_buffer = o.group(3) == b"p" + seek_offset = start_offset + # some files don't seek to the right location, so better be safe here + seek_offset = max(seek_offset - 1, 0) + if should_buffer: + # FIXME this is kind of a hack, but we will jump to the previous keyframe + # so this will be safe + seek_offset = max(seek_offset - max_buffer_size, 0) + try: + # TODO check if stream needs to always be the video stream here or not + container.seek(seek_offset, any_frame=False, backward=True, stream=stream) + except av.AVError: + # TODO add some warnings in this case + # print("Corrupted file?", container.name) + return [] + buffer_count = 0 + try: + for _idx, frame in enumerate(container.decode(**stream_name)): + frames[frame.pts] = frame + if frame.pts >= end_offset: + if should_buffer and buffer_count < max_buffer_size: + buffer_count += 1 + continue + break + except av.AVError: + # TODO add a warning + pass + # ensure that the results are sorted wrt the pts + result = [ + frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset + ] + if len(frames) > 0 and start_offset > 0 and start_offset not in frames: + # if there is no frame that exactly matches the pts of start_offset + # add the last frame smaller than start_offset, to guarantee that + # we will have all the necessary data. This is most useful for audio + preceding_frames = [i for i in frames if i < start_offset] + if len(preceding_frames) > 0: + first_frame_pts = max(preceding_frames) + result.insert(0, frames[first_frame_pts]) + return result + + +def _align_audio_frames( + aframes: torch.Tensor, audio_frames: List["av.frame.Frame"], ref_start: int, ref_end: float +) -> torch.Tensor: + start, end = audio_frames[0].pts, audio_frames[-1].pts + total_aframes = aframes.shape[1] + step_per_aframe = (end - start + 1) / total_aframes + s_idx = 0 + e_idx = total_aframes + if start < ref_start: + s_idx = int((ref_start - start) / step_per_aframe) + if end > ref_end: + e_idx = int((ref_end - end) / step_per_aframe) + return aframes[:, s_idx:e_idx] + + +
[docs]def read_video( + filename: str, start_pts: int = 0, end_pts: Optional[float] = None, pts_unit: str = "pts" +) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]: + """ + Reads a video from a file, returning both the video frames as well as + the audio frames + + Parameters + ---------- + filename : str + path to the video file + start_pts : int if pts_unit = 'pts', optional + float / Fraction if pts_unit = 'sec', optional + the start presentation time of the video + end_pts : int if pts_unit = 'pts', optional + float / Fraction if pts_unit = 'sec', optional + the end presentation time + pts_unit : str, optional + unit in which start_pts and end_pts values will be interpreted, either 'pts' or 'sec'. Defaults to 'pts'. + + Returns + ------- + vframes : Tensor[T, H, W, C] + the `T` video frames + aframes : Tensor[K, L] + the audio frames, where `K` is the number of channels and `L` is the + number of points + info : Dict + metadata for the video and audio. Can contain the fields video_fps (float) + and audio_fps (int) + """ + + from torchvision import get_video_backend + + if get_video_backend() != "pyav": + return _video_opt._read_video(filename, start_pts, end_pts, pts_unit) + + _check_av_available() + + if end_pts is None: + end_pts = float("inf") + + if end_pts < start_pts: + raise ValueError( + "end_pts should be larger than start_pts, got " + "start_pts={} and end_pts={}".format(start_pts, end_pts) + ) + + info = {} + video_frames = [] + audio_frames = [] + + try: + with av.open(filename, metadata_errors="ignore") as container: + if container.streams.video: + video_frames = _read_from_stream( + container, + start_pts, + end_pts, + pts_unit, + container.streams.video[0], + {"video": 0}, + ) + video_fps = container.streams.video[0].average_rate + # guard against potentially corrupted files + if video_fps is not None: + info["video_fps"] = float(video_fps) + + if container.streams.audio: + audio_frames = _read_from_stream( + container, + start_pts, + end_pts, + pts_unit, + container.streams.audio[0], + {"audio": 0}, + ) + info["audio_fps"] = container.streams.audio[0].rate + + except av.AVError: + # TODO raise a warning? + pass + + vframes_list = [frame.to_rgb().to_ndarray() for frame in video_frames] + aframes_list = [frame.to_ndarray() for frame in audio_frames] + + if vframes_list: + vframes = torch.as_tensor(np.stack(vframes_list)) + else: + vframes = torch.empty((0, 1, 1, 3), dtype=torch.uint8) + + if aframes_list: + aframes = np.concatenate(aframes_list, 1) + aframes = torch.as_tensor(aframes) + aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts) + else: + aframes = torch.empty((1, 0), dtype=torch.float32) + + return vframes, aframes, info
+ + +def _can_read_timestamps_from_packets(container: "av.container.Container") -> bool: + extradata = container.streams[0].codec_context.extradata + if extradata is None: + return False + if b"Lavc" in extradata: + return True + return False + + +def _decode_video_timestamps(container: "av.container.Container") -> List[int]: + if _can_read_timestamps_from_packets(container): + # fast path + return [x.pts for x in container.demux(video=0) if x.pts is not None] + else: + return [x.pts for x in container.decode(video=0) if x.pts is not None] + + +
[docs]def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[int], Optional[float]]: + """ + List the video frames timestamps. + + Note that the function decodes the whole video frame-by-frame. + + Parameters + ---------- + filename : str + path to the video file + pts_unit : str, optional + unit in which timestamp values will be returned either 'pts' or 'sec'. Defaults to 'pts'. + + Returns + ------- + pts : List[int] if pts_unit = 'pts' + List[Fraction] if pts_unit = 'sec' + presentation timestamps for each one of the frames in the video. + video_fps : float, optional + the frame rate for the video + + """ + from torchvision import get_video_backend + + if get_video_backend() != "pyav": + return _video_opt._read_video_timestamps(filename, pts_unit) + + _check_av_available() + + video_fps = None + pts = [] + + try: + with av.open(filename, metadata_errors="ignore") as container: + if container.streams.video: + video_stream = container.streams.video[0] + video_time_base = video_stream.time_base + try: + pts = _decode_video_timestamps(container) + except av.AVError: + warnings.warn(f"Failed decoding frames for file {filename}") + video_fps = float(video_stream.average_rate) + except av.AVError: + # TODO add a warning + pass + + pts.sort() + + if pts_unit == "sec": + pts = [x * video_time_base for x in pts] + + return pts, video_fps
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/alexnet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/alexnet.html new file mode 100644 index 000000000000..1ebd38d9aa76 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/alexnet.html @@ -0,0 +1,617 @@ + + + + + + + + + + + + torchvision.models.alexnet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.alexnet

+import torch
+import torch.nn as nn
+from .utils import load_state_dict_from_url
+
+
+__all__ = ['AlexNet', 'alexnet']
+
+
+model_urls = {
+    'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
+}
+
+
+class AlexNet(nn.Module):
+
+    def __init__(self, num_classes=1000):
+        super(AlexNet, self).__init__()
+        self.features = nn.Sequential(
+            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(kernel_size=3, stride=2),
+            nn.Conv2d(64, 192, kernel_size=5, padding=2),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(kernel_size=3, stride=2),
+            nn.Conv2d(192, 384, kernel_size=3, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(384, 256, kernel_size=3, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(256, 256, kernel_size=3, padding=1),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(kernel_size=3, stride=2),
+        )
+        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
+        self.classifier = nn.Sequential(
+            nn.Dropout(),
+            nn.Linear(256 * 6 * 6, 4096),
+            nn.ReLU(inplace=True),
+            nn.Dropout(),
+            nn.Linear(4096, 4096),
+            nn.ReLU(inplace=True),
+            nn.Linear(4096, num_classes),
+        )
+
+    def forward(self, x):
+        x = self.features(x)
+        x = self.avgpool(x)
+        x = torch.flatten(x, 1)
+        x = self.classifier(x)
+        return x
+
+
+
[docs]def alexnet(pretrained=False, progress=True, **kwargs): + r"""AlexNet model architecture from the + `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = AlexNet(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['alexnet'], + progress=progress) + model.load_state_dict(state_dict) + return model
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/densenet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/densenet.html new file mode 100644 index 000000000000..ccb67e750e78 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/densenet.html @@ -0,0 +1,831 @@ + + + + + + + + + + + + torchvision.models.densenet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.densenet

+import re
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as cp
+from collections import OrderedDict
+from .utils import load_state_dict_from_url
+from torch import Tensor
+from torch.jit.annotations import List
+
+
+__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
+
+model_urls = {
+    'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
+    'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
+    'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
+    'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
+}
+
+
+class _DenseLayer(nn.Module):
+    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):
+        super(_DenseLayer, self).__init__()
+        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
+        self.add_module('relu1', nn.ReLU(inplace=True)),
+        self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
+                                           growth_rate, kernel_size=1, stride=1,
+                                           bias=False)),
+        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
+        self.add_module('relu2', nn.ReLU(inplace=True)),
+        self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
+                                           kernel_size=3, stride=1, padding=1,
+                                           bias=False)),
+        self.drop_rate = float(drop_rate)
+        self.memory_efficient = memory_efficient
+
+    def bn_function(self, inputs):
+        # type: (List[Tensor]) -> Tensor
+        concated_features = torch.cat(inputs, 1)
+        bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features)))  # noqa: T484
+        return bottleneck_output
+
+    # todo: rewrite when torchscript supports any
+    def any_requires_grad(self, input):
+        # type: (List[Tensor]) -> bool
+        for tensor in input:
+            if tensor.requires_grad:
+                return True
+        return False
+
+    @torch.jit.unused  # noqa: T484
+    def call_checkpoint_bottleneck(self, input):
+        # type: (List[Tensor]) -> Tensor
+        def closure(*inputs):
+            return self.bn_function(inputs)
+
+        return cp.checkpoint(closure, *input)
+
+    @torch.jit._overload_method  # noqa: F811
+    def forward(self, input):
+        # type: (List[Tensor]) -> (Tensor)
+        pass
+
+    @torch.jit._overload_method  # noqa: F811
+    def forward(self, input):
+        # type: (Tensor) -> (Tensor)
+        pass
+
+    # torchscript does not yet support *args, so we overload method
+    # allowing it to take either a List[Tensor] or single Tensor
+    def forward(self, input):  # noqa: F811
+        if isinstance(input, Tensor):
+            prev_features = [input]
+        else:
+            prev_features = input
+
+        if self.memory_efficient and self.any_requires_grad(prev_features):
+            if torch.jit.is_scripting():
+                raise Exception("Memory Efficient not supported in JIT")
+
+            bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
+        else:
+            bottleneck_output = self.bn_function(prev_features)
+
+        new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
+        if self.drop_rate > 0:
+            new_features = F.dropout(new_features, p=self.drop_rate,
+                                     training=self.training)
+        return new_features
+
+
+class _DenseBlock(nn.ModuleDict):
+    _version = 2
+
+    def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):
+        super(_DenseBlock, self).__init__()
+        for i in range(num_layers):
+            layer = _DenseLayer(
+                num_input_features + i * growth_rate,
+                growth_rate=growth_rate,
+                bn_size=bn_size,
+                drop_rate=drop_rate,
+                memory_efficient=memory_efficient,
+            )
+            self.add_module('denselayer%d' % (i + 1), layer)
+
+    def forward(self, init_features):
+        features = [init_features]
+        for name, layer in self.items():
+            new_features = layer(features)
+            features.append(new_features)
+        return torch.cat(features, 1)
+
+
+class _Transition(nn.Sequential):
+    def __init__(self, num_input_features, num_output_features):
+        super(_Transition, self).__init__()
+        self.add_module('norm', nn.BatchNorm2d(num_input_features))
+        self.add_module('relu', nn.ReLU(inplace=True))
+        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
+                                          kernel_size=1, stride=1, bias=False))
+        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
+
+
+class DenseNet(nn.Module):
+    r"""Densenet-BC model class, based on
+    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
+
+    Args:
+        growth_rate (int) - how many filters to add each layer (`k` in paper)
+        block_config (list of 4 ints) - how many layers in each pooling block
+        num_init_features (int) - the number of filters to learn in the first convolution layer
+        bn_size (int) - multiplicative factor for number of bottle neck layers
+          (i.e. bn_size * k features in the bottleneck layer)
+        drop_rate (float) - dropout rate after each dense layer
+        num_classes (int) - number of classification classes
+        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
+          but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
+    """
+
+    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
+                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):
+
+        super(DenseNet, self).__init__()
+
+        # First convolution
+        self.features = nn.Sequential(OrderedDict([
+            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
+                                padding=3, bias=False)),
+            ('norm0', nn.BatchNorm2d(num_init_features)),
+            ('relu0', nn.ReLU(inplace=True)),
+            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
+        ]))
+
+        # Each denseblock
+        num_features = num_init_features
+        for i, num_layers in enumerate(block_config):
+            block = _DenseBlock(
+                num_layers=num_layers,
+                num_input_features=num_features,
+                bn_size=bn_size,
+                growth_rate=growth_rate,
+                drop_rate=drop_rate,
+                memory_efficient=memory_efficient
+            )
+            self.features.add_module('denseblock%d' % (i + 1), block)
+            num_features = num_features + num_layers * growth_rate
+            if i != len(block_config) - 1:
+                trans = _Transition(num_input_features=num_features,
+                                    num_output_features=num_features // 2)
+                self.features.add_module('transition%d' % (i + 1), trans)
+                num_features = num_features // 2
+
+        # Final batch norm
+        self.features.add_module('norm5', nn.BatchNorm2d(num_features))
+
+        # Linear layer
+        self.classifier = nn.Linear(num_features, num_classes)
+
+        # Official init from torch repo.
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight)
+            elif isinstance(m, nn.BatchNorm2d):
+                nn.init.constant_(m.weight, 1)
+                nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.Linear):
+                nn.init.constant_(m.bias, 0)
+
+    def forward(self, x):
+        features = self.features(x)
+        out = F.relu(features, inplace=True)
+        out = F.adaptive_avg_pool2d(out, (1, 1))
+        out = torch.flatten(out, 1)
+        out = self.classifier(out)
+        return out
+
+
+def _load_state_dict(model, model_url, progress):
+    # '.'s are no longer allowed in module names, but previous _DenseLayer
+    # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
+    # They are also in the checkpoints in model_urls. This pattern is used
+    # to find such keys.
+    pattern = re.compile(
+        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
+
+    state_dict = load_state_dict_from_url(model_url, progress=progress)
+    for key in list(state_dict.keys()):
+        res = pattern.match(key)
+        if res:
+            new_key = res.group(1) + res.group(2)
+            state_dict[new_key] = state_dict[key]
+            del state_dict[key]
+    model.load_state_dict(state_dict)
+
+
+def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,
+              **kwargs):
+    model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
+    if pretrained:
+        _load_state_dict(model, model_urls[arch], progress)
+    return model
+
+
+
[docs]def densenet121(pretrained=False, progress=True, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, + **kwargs)
+ + +
[docs]def densenet161(pretrained=False, progress=True, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, + **kwargs)
+ + +
[docs]def densenet169(pretrained=False, progress=True, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, + **kwargs)
+ + +
[docs]def densenet201(pretrained=False, progress=True, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, + **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/detection/faster_rcnn.html b/docs/1.7.0/torchvision/_modules/torchvision/models/detection/faster_rcnn.html new file mode 100644 index 000000000000..88bea7587d2a --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/detection/faster_rcnn.html @@ -0,0 +1,915 @@ + + + + + + + + + + + + torchvision.models.detection.faster_rcnn — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.detection.faster_rcnn

+from collections import OrderedDict
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from torchvision.ops import misc as misc_nn_ops
+from torchvision.ops import MultiScaleRoIAlign
+
+from ..utils import load_state_dict_from_url
+
+from .generalized_rcnn import GeneralizedRCNN
+from .rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
+from .roi_heads import RoIHeads
+from .transform import GeneralizedRCNNTransform
+from .backbone_utils import resnet_fpn_backbone
+
+
+__all__ = [
+    "FasterRCNN", "fasterrcnn_resnet50_fpn",
+]
+
+
+class FasterRCNN(GeneralizedRCNN):
+    """
+    Implements Faster R-CNN.
+
+    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
+    image, and should be in 0-1 range. Different images can have different sizes.
+
+    The behavior of the model changes depending if it is in training or evaluation mode.
+
+    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
+    containing:
+        - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x
+          between 0 and W and values of y between 0 and H
+        - labels (Int64Tensor[N]): the class label for each ground-truth box
+
+    The model returns a Dict[Tensor] during training, containing the classification and regression
+    losses for both the RPN and the R-CNN.
+
+    During inference, the model requires only the input tensors, and returns the post-processed
+    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
+    follows:
+        - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x
+          between 0 and W and values of y between 0 and H
+        - labels (Int64Tensor[N]): the predicted labels for each image
+        - scores (Tensor[N]): the scores or each prediction
+
+    Arguments:
+        backbone (nn.Module): the network used to compute the features for the model.
+            It should contain a out_channels attribute, which indicates the number of output
+            channels that each feature map has (and it should be the same for all feature maps).
+            The backbone should return a single Tensor or and OrderedDict[Tensor].
+        num_classes (int): number of output classes of the model (including the background).
+            If box_predictor is specified, num_classes should be None.
+        min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
+        max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
+        image_mean (Tuple[float, float, float]): mean values used for input normalization.
+            They are generally the mean values of the dataset on which the backbone has been trained
+            on
+        image_std (Tuple[float, float, float]): std values used for input normalization.
+            They are generally the std values of the dataset on which the backbone has been trained on
+        rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
+            maps.
+        rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
+        rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
+        rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
+        rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
+        rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
+        rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
+        rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
+            considered as positive during training of the RPN.
+        rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
+            considered as negative during training of the RPN.
+        rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
+            for computing the loss
+        rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
+            of the RPN
+        box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
+            the locations indicated by the bounding boxes
+        box_head (nn.Module): module that takes the cropped feature maps as input
+        box_predictor (nn.Module): module that takes the output of box_head and returns the
+            classification logits and box regression deltas.
+        box_score_thresh (float): during inference, only return proposals with a classification score
+            greater than box_score_thresh
+        box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
+        box_detections_per_img (int): maximum number of detections per image, for all classes.
+        box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
+            considered as positive during training of the classification head
+        box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
+            considered as negative during training of the classification head
+        box_batch_size_per_image (int): number of proposals that are sampled during training of the
+            classification head
+        box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
+            of the classification head
+        bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
+            bounding boxes
+
+    Example::
+
+        >>> import torch
+        >>> import torchvision
+        >>> from torchvision.models.detection import FasterRCNN
+        >>> from torchvision.models.detection.rpn import AnchorGenerator
+        >>> # load a pre-trained model for classification and return
+        >>> # only the features
+        >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
+        >>> # FasterRCNN needs to know the number of
+        >>> # output channels in a backbone. For mobilenet_v2, it's 1280
+        >>> # so we need to add it here
+        >>> backbone.out_channels = 1280
+        >>>
+        >>> # let's make the RPN generate 5 x 3 anchors per spatial
+        >>> # location, with 5 different sizes and 3 different aspect
+        >>> # ratios. We have a Tuple[Tuple[int]] because each feature
+        >>> # map could potentially have different sizes and
+        >>> # aspect ratios
+        >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
+        >>>                                    aspect_ratios=((0.5, 1.0, 2.0),))
+        >>>
+        >>> # let's define what are the feature maps that we will
+        >>> # use to perform the region of interest cropping, as well as
+        >>> # the size of the crop after rescaling.
+        >>> # if your backbone returns a Tensor, featmap_names is expected to
+        >>> # be ['0']. More generally, the backbone should return an
+        >>> # OrderedDict[Tensor], and in featmap_names you can choose which
+        >>> # feature maps to use.
+        >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
+        >>>                                                 output_size=7,
+        >>>                                                 sampling_ratio=2)
+        >>>
+        >>> # put the pieces together inside a FasterRCNN model
+        >>> model = FasterRCNN(backbone,
+        >>>                    num_classes=2,
+        >>>                    rpn_anchor_generator=anchor_generator,
+        >>>                    box_roi_pool=roi_pooler)
+        >>> model.eval()
+        >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
+        >>> predictions = model(x)
+    """
+
+    def __init__(self, backbone, num_classes=None,
+                 # transform parameters
+                 min_size=800, max_size=1333,
+                 image_mean=None, image_std=None,
+                 # RPN parameters
+                 rpn_anchor_generator=None, rpn_head=None,
+                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
+                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
+                 rpn_nms_thresh=0.7,
+                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
+                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
+                 # Box parameters
+                 box_roi_pool=None, box_head=None, box_predictor=None,
+                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
+                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
+                 box_batch_size_per_image=512, box_positive_fraction=0.25,
+                 bbox_reg_weights=None):
+
+        if not hasattr(backbone, "out_channels"):
+            raise ValueError(
+                "backbone should contain an attribute out_channels "
+                "specifying the number of output channels (assumed to be the "
+                "same for all the levels)")
+
+        assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
+        assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
+
+        if num_classes is not None:
+            if box_predictor is not None:
+                raise ValueError("num_classes should be None when box_predictor is specified")
+        else:
+            if box_predictor is None:
+                raise ValueError("num_classes should not be None when box_predictor "
+                                 "is not specified")
+
+        out_channels = backbone.out_channels
+
+        if rpn_anchor_generator is None:
+            anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
+            aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
+            rpn_anchor_generator = AnchorGenerator(
+                anchor_sizes, aspect_ratios
+            )
+        if rpn_head is None:
+            rpn_head = RPNHead(
+                out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
+            )
+
+        rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
+        rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
+
+        rpn = RegionProposalNetwork(
+            rpn_anchor_generator, rpn_head,
+            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
+            rpn_batch_size_per_image, rpn_positive_fraction,
+            rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh)
+
+        if box_roi_pool is None:
+            box_roi_pool = MultiScaleRoIAlign(
+                featmap_names=['0', '1', '2', '3'],
+                output_size=7,
+                sampling_ratio=2)
+
+        if box_head is None:
+            resolution = box_roi_pool.output_size[0]
+            representation_size = 1024
+            box_head = TwoMLPHead(
+                out_channels * resolution ** 2,
+                representation_size)
+
+        if box_predictor is None:
+            representation_size = 1024
+            box_predictor = FastRCNNPredictor(
+                representation_size,
+                num_classes)
+
+        roi_heads = RoIHeads(
+            # Box
+            box_roi_pool, box_head, box_predictor,
+            box_fg_iou_thresh, box_bg_iou_thresh,
+            box_batch_size_per_image, box_positive_fraction,
+            bbox_reg_weights,
+            box_score_thresh, box_nms_thresh, box_detections_per_img)
+
+        if image_mean is None:
+            image_mean = [0.485, 0.456, 0.406]
+        if image_std is None:
+            image_std = [0.229, 0.224, 0.225]
+        transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
+
+        super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform)
+
+
+class TwoMLPHead(nn.Module):
+    """
+    Standard heads for FPN-based models
+
+    Arguments:
+        in_channels (int): number of input channels
+        representation_size (int): size of the intermediate representation
+    """
+
+    def __init__(self, in_channels, representation_size):
+        super(TwoMLPHead, self).__init__()
+
+        self.fc6 = nn.Linear(in_channels, representation_size)
+        self.fc7 = nn.Linear(representation_size, representation_size)
+
+    def forward(self, x):
+        x = x.flatten(start_dim=1)
+
+        x = F.relu(self.fc6(x))
+        x = F.relu(self.fc7(x))
+
+        return x
+
+
+class FastRCNNPredictor(nn.Module):
+    """
+    Standard classification + bounding box regression layers
+    for Fast R-CNN.
+
+    Arguments:
+        in_channels (int): number of input channels
+        num_classes (int): number of output classes (including background)
+    """
+
+    def __init__(self, in_channels, num_classes):
+        super(FastRCNNPredictor, self).__init__()
+        self.cls_score = nn.Linear(in_channels, num_classes)
+        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
+
+    def forward(self, x):
+        if x.dim() == 4:
+            assert list(x.shape[2:]) == [1, 1]
+        x = x.flatten(start_dim=1)
+        scores = self.cls_score(x)
+        bbox_deltas = self.bbox_pred(x)
+
+        return scores, bbox_deltas
+
+
+model_urls = {
+    'fasterrcnn_resnet50_fpn_coco':
+        'https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth',
+}
+
+
+
[docs]def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs): + """ + Constructs a Faster R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values of ``x`` + between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H`` + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values of ``x`` + between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H`` + - labels (``Int64Tensor[N]``): the predicted labels for each image + - scores (``Tensor[N]``): the scores or each prediction + + Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) + >>> # For training + >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4) + >>> labels = torch.randint(1, 91, (4, 11)) + >>> images = list(image for image in images) + >>> targets = [] + >>> for i in range(len(images)): + >>> d = {} + >>> d['boxes'] = boxes[i] + >>> d['labels'] = labels[i] + >>> targets.append(d) + >>> output = model(images, targets) + >>> # For inference + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11) + + Arguments: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + num_classes (int): number of output classes of the model (including the background) + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + assert trainable_backbone_layers <= 5 and trainable_backbone_layers >= 0 + # dont freeze any layers if pretrained model or backbone is not used + if not (pretrained or pretrained_backbone): + trainable_backbone_layers = 5 + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers) + model = FasterRCNN(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + return model
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/detection/keypoint_rcnn.html b/docs/1.7.0/torchvision/_modules/torchvision/models/detection/keypoint_rcnn.html new file mode 100644 index 000000000000..7dbd87f30ec3 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/detection/keypoint_rcnn.html @@ -0,0 +1,887 @@ + + + + + + + + + + + + torchvision.models.detection.keypoint_rcnn — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.detection.keypoint_rcnn

+import torch
+from torch import nn
+
+from torchvision.ops import MultiScaleRoIAlign
+
+from ..utils import load_state_dict_from_url
+
+from .faster_rcnn import FasterRCNN
+from .backbone_utils import resnet_fpn_backbone
+
+
+__all__ = [
+    "KeypointRCNN", "keypointrcnn_resnet50_fpn"
+]
+
+
+class KeypointRCNN(FasterRCNN):
+    """
+    Implements Keypoint R-CNN.
+
+    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
+    image, and should be in 0-1 range. Different images can have different sizes.
+
+    The behavior of the model changes depending if it is in training or evaluation mode.
+
+    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
+    containing:
+        - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x
+          between 0 and W and values of y between 0 and H
+        - labels (Int64Tensor[N]): the class label for each ground-truth box
+        - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the
+          format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
+
+    The model returns a Dict[Tensor] during training, containing the classification and regression
+    losses for both the RPN and the R-CNN, and the keypoint loss.
+
+    During inference, the model requires only the input tensors, and returns the post-processed
+    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
+    follows:
+        - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x
+          between 0 and W and values of y between 0 and H
+        - labels (Int64Tensor[N]): the predicted labels for each image
+        - scores (Tensor[N]): the scores or each prediction
+        - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
+
+    Arguments:
+        backbone (nn.Module): the network used to compute the features for the model.
+            It should contain a out_channels attribute, which indicates the number of output
+            channels that each feature map has (and it should be the same for all feature maps).
+            The backbone should return a single Tensor or and OrderedDict[Tensor].
+        num_classes (int): number of output classes of the model (including the background).
+            If box_predictor is specified, num_classes should be None.
+        min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
+        max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
+        image_mean (Tuple[float, float, float]): mean values used for input normalization.
+            They are generally the mean values of the dataset on which the backbone has been trained
+            on
+        image_std (Tuple[float, float, float]): std values used for input normalization.
+            They are generally the std values of the dataset on which the backbone has been trained on
+        rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
+            maps.
+        rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
+        rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
+        rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
+        rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
+        rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
+        rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
+        rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
+            considered as positive during training of the RPN.
+        rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
+            considered as negative during training of the RPN.
+        rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
+            for computing the loss
+        rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
+            of the RPN
+        box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
+            the locations indicated by the bounding boxes
+        box_head (nn.Module): module that takes the cropped feature maps as input
+        box_predictor (nn.Module): module that takes the output of box_head and returns the
+            classification logits and box regression deltas.
+        box_score_thresh (float): during inference, only return proposals with a classification score
+            greater than box_score_thresh
+        box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
+        box_detections_per_img (int): maximum number of detections per image, for all classes.
+        box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
+            considered as positive during training of the classification head
+        box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
+            considered as negative during training of the classification head
+        box_batch_size_per_image (int): number of proposals that are sampled during training of the
+            classification head
+        box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
+            of the classification head
+        bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
+            bounding boxes
+        keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
+             the locations indicated by the bounding boxes, which will be used for the keypoint head.
+        keypoint_head (nn.Module): module that takes the cropped feature maps as input
+        keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the
+            heatmap logits
+
+    Example::
+
+        >>> import torch
+        >>> import torchvision
+        >>> from torchvision.models.detection import KeypointRCNN
+        >>> from torchvision.models.detection.rpn import AnchorGenerator
+        >>>
+        >>> # load a pre-trained model for classification and return
+        >>> # only the features
+        >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
+        >>> # KeypointRCNN needs to know the number of
+        >>> # output channels in a backbone. For mobilenet_v2, it's 1280
+        >>> # so we need to add it here
+        >>> backbone.out_channels = 1280
+        >>>
+        >>> # let's make the RPN generate 5 x 3 anchors per spatial
+        >>> # location, with 5 different sizes and 3 different aspect
+        >>> # ratios. We have a Tuple[Tuple[int]] because each feature
+        >>> # map could potentially have different sizes and
+        >>> # aspect ratios
+        >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
+        >>>                                    aspect_ratios=((0.5, 1.0, 2.0),))
+        >>>
+        >>> # let's define what are the feature maps that we will
+        >>> # use to perform the region of interest cropping, as well as
+        >>> # the size of the crop after rescaling.
+        >>> # if your backbone returns a Tensor, featmap_names is expected to
+        >>> # be ['0']. More generally, the backbone should return an
+        >>> # OrderedDict[Tensor], and in featmap_names you can choose which
+        >>> # feature maps to use.
+        >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
+        >>>                                                 output_size=7,
+        >>>                                                 sampling_ratio=2)
+        >>>
+        >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
+        >>>                                                          output_size=14,
+        >>>                                                          sampling_ratio=2)
+        >>> # put the pieces together inside a KeypointRCNN model
+        >>> model = KeypointRCNN(backbone,
+        >>>                      num_classes=2,
+        >>>                      rpn_anchor_generator=anchor_generator,
+        >>>                      box_roi_pool=roi_pooler,
+        >>>                      keypoint_roi_pool=keypoint_roi_pooler)
+        >>> model.eval()
+        >>> model.eval()
+        >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
+        >>> predictions = model(x)
+    """
+    def __init__(self, backbone, num_classes=None,
+                 # transform parameters
+                 min_size=None, max_size=1333,
+                 image_mean=None, image_std=None,
+                 # RPN parameters
+                 rpn_anchor_generator=None, rpn_head=None,
+                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
+                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
+                 rpn_nms_thresh=0.7,
+                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
+                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
+                 # Box parameters
+                 box_roi_pool=None, box_head=None, box_predictor=None,
+                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
+                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
+                 box_batch_size_per_image=512, box_positive_fraction=0.25,
+                 bbox_reg_weights=None,
+                 # keypoint parameters
+                 keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None,
+                 num_keypoints=17):
+
+        assert isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None)))
+        if min_size is None:
+            min_size = (640, 672, 704, 736, 768, 800)
+
+        if num_classes is not None:
+            if keypoint_predictor is not None:
+                raise ValueError("num_classes should be None when keypoint_predictor is specified")
+
+        out_channels = backbone.out_channels
+
+        if keypoint_roi_pool is None:
+            keypoint_roi_pool = MultiScaleRoIAlign(
+                featmap_names=['0', '1', '2', '3'],
+                output_size=14,
+                sampling_ratio=2)
+
+        if keypoint_head is None:
+            keypoint_layers = tuple(512 for _ in range(8))
+            keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers)
+
+        if keypoint_predictor is None:
+            keypoint_dim_reduced = 512  # == keypoint_layers[-1]
+            keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)
+
+        super(KeypointRCNN, self).__init__(
+            backbone, num_classes,
+            # transform parameters
+            min_size, max_size,
+            image_mean, image_std,
+            # RPN-specific parameters
+            rpn_anchor_generator, rpn_head,
+            rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,
+            rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,
+            rpn_nms_thresh,
+            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
+            rpn_batch_size_per_image, rpn_positive_fraction,
+            # Box parameters
+            box_roi_pool, box_head, box_predictor,
+            box_score_thresh, box_nms_thresh, box_detections_per_img,
+            box_fg_iou_thresh, box_bg_iou_thresh,
+            box_batch_size_per_image, box_positive_fraction,
+            bbox_reg_weights)
+
+        self.roi_heads.keypoint_roi_pool = keypoint_roi_pool
+        self.roi_heads.keypoint_head = keypoint_head
+        self.roi_heads.keypoint_predictor = keypoint_predictor
+
+
+class KeypointRCNNHeads(nn.Sequential):
+    def __init__(self, in_channels, layers):
+        d = []
+        next_feature = in_channels
+        for out_channels in layers:
+            d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
+            d.append(nn.ReLU(inplace=True))
+            next_feature = out_channels
+        super(KeypointRCNNHeads, self).__init__(*d)
+        for m in self.children():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
+                nn.init.constant_(m.bias, 0)
+
+
+class KeypointRCNNPredictor(nn.Module):
+    def __init__(self, in_channels, num_keypoints):
+        super(KeypointRCNNPredictor, self).__init__()
+        input_features = in_channels
+        deconv_kernel = 4
+        self.kps_score_lowres = nn.ConvTranspose2d(
+            input_features,
+            num_keypoints,
+            deconv_kernel,
+            stride=2,
+            padding=deconv_kernel // 2 - 1,
+        )
+        nn.init.kaiming_normal_(
+            self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu"
+        )
+        nn.init.constant_(self.kps_score_lowres.bias, 0)
+        self.up_scale = 2
+        self.out_channels = num_keypoints
+
+    def forward(self, x):
+        x = self.kps_score_lowres(x)
+        return torch.nn.functional.interpolate(
+            x, scale_factor=float(self.up_scale), mode="bilinear", align_corners=False, recompute_scale_factor=False
+        )
+
+
+model_urls = {
+    # legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606
+    'keypointrcnn_resnet50_fpn_coco_legacy':
+        'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth',
+    'keypointrcnn_resnet50_fpn_coco':
+        'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth',
+}
+
+
+
[docs]def keypointrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=2, num_keypoints=17, + pretrained_backbone=True, trainable_backbone_layers=3, **kwargs): + """ + Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values of ``x`` + between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H`` + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the + format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible. + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the keypoint loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values of ``x`` + between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H`` + - labels (``Int64Tensor[N]``): the predicted labels for each image + - scores (``Tensor[N]``): the scores or each prediction + - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format. + + Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11) + + Arguments: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + num_classes (int): number of output classes of the model (including the background) + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + assert trainable_backbone_layers <= 5 and trainable_backbone_layers >= 0 + # dont freeze any layers if pretrained model or backbone is not used + if not (pretrained or pretrained_backbone): + trainable_backbone_layers = 5 + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers) + model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs) + if pretrained: + key = 'keypointrcnn_resnet50_fpn_coco' + if pretrained == 'legacy': + key += '_legacy' + state_dict = load_state_dict_from_url(model_urls[key], + progress=progress) + model.load_state_dict(state_dict) + return model
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/detection/mask_rcnn.html b/docs/1.7.0/torchvision/_modules/torchvision/models/detection/mask_rcnn.html new file mode 100644 index 000000000000..c3aa4863f90a --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/detection/mask_rcnn.html @@ -0,0 +1,883 @@ + + + + + + + + + + + + torchvision.models.detection.mask_rcnn — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.detection.mask_rcnn

+from collections import OrderedDict
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from torchvision.ops import misc as misc_nn_ops
+from torchvision.ops import MultiScaleRoIAlign
+
+from ..utils import load_state_dict_from_url
+
+from .faster_rcnn import FasterRCNN
+from .backbone_utils import resnet_fpn_backbone
+
+__all__ = [
+    "MaskRCNN", "maskrcnn_resnet50_fpn",
+]
+
+
+class MaskRCNN(FasterRCNN):
+    """
+    Implements Mask R-CNN.
+
+    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
+    image, and should be in 0-1 range. Different images can have different sizes.
+
+    The behavior of the model changes depending if it is in training or evaluation mode.
+
+    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
+    containing:
+        - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x
+          between 0 and W and values of y between 0 and H
+        - labels (Int64Tensor[N]): the class label for each ground-truth box
+        - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance
+
+    The model returns a Dict[Tensor] during training, containing the classification and regression
+    losses for both the RPN and the R-CNN, and the mask loss.
+
+    During inference, the model requires only the input tensors, and returns the post-processed
+    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
+    follows:
+        - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x
+          between 0 and W and values of y between 0 and H
+        - labels (Int64Tensor[N]): the predicted labels for each image
+        - scores (Tensor[N]): the scores or each prediction
+        - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
+          obtain the final segmentation masks, the soft masks can be thresholded, generally
+          with a value of 0.5 (mask >= 0.5)
+
+    Arguments:
+        backbone (nn.Module): the network used to compute the features for the model.
+            It should contain a out_channels attribute, which indicates the number of output
+            channels that each feature map has (and it should be the same for all feature maps).
+            The backbone should return a single Tensor or and OrderedDict[Tensor].
+        num_classes (int): number of output classes of the model (including the background).
+            If box_predictor is specified, num_classes should be None.
+        min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
+        max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
+        image_mean (Tuple[float, float, float]): mean values used for input normalization.
+            They are generally the mean values of the dataset on which the backbone has been trained
+            on
+        image_std (Tuple[float, float, float]): std values used for input normalization.
+            They are generally the std values of the dataset on which the backbone has been trained on
+        rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
+            maps.
+        rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
+        rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
+        rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
+        rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
+        rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
+        rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
+        rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
+            considered as positive during training of the RPN.
+        rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
+            considered as negative during training of the RPN.
+        rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
+            for computing the loss
+        rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
+            of the RPN
+        box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
+            the locations indicated by the bounding boxes
+        box_head (nn.Module): module that takes the cropped feature maps as input
+        box_predictor (nn.Module): module that takes the output of box_head and returns the
+            classification logits and box regression deltas.
+        box_score_thresh (float): during inference, only return proposals with a classification score
+            greater than box_score_thresh
+        box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
+        box_detections_per_img (int): maximum number of detections per image, for all classes.
+        box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
+            considered as positive during training of the classification head
+        box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
+            considered as negative during training of the classification head
+        box_batch_size_per_image (int): number of proposals that are sampled during training of the
+            classification head
+        box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
+            of the classification head
+        bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
+            bounding boxes
+        mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
+             the locations indicated by the bounding boxes, which will be used for the mask head.
+        mask_head (nn.Module): module that takes the cropped feature maps as input
+        mask_predictor (nn.Module): module that takes the output of the mask_head and returns the
+            segmentation mask logits
+
+    Example::
+
+        >>> import torch
+        >>> import torchvision
+        >>> from torchvision.models.detection import MaskRCNN
+        >>> from torchvision.models.detection.rpn import AnchorGenerator
+        >>>
+        >>> # load a pre-trained model for classification and return
+        >>> # only the features
+        >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
+        >>> # MaskRCNN needs to know the number of
+        >>> # output channels in a backbone. For mobilenet_v2, it's 1280
+        >>> # so we need to add it here
+        >>> backbone.out_channels = 1280
+        >>>
+        >>> # let's make the RPN generate 5 x 3 anchors per spatial
+        >>> # location, with 5 different sizes and 3 different aspect
+        >>> # ratios. We have a Tuple[Tuple[int]] because each feature
+        >>> # map could potentially have different sizes and
+        >>> # aspect ratios
+        >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
+        >>>                                    aspect_ratios=((0.5, 1.0, 2.0),))
+        >>>
+        >>> # let's define what are the feature maps that we will
+        >>> # use to perform the region of interest cropping, as well as
+        >>> # the size of the crop after rescaling.
+        >>> # if your backbone returns a Tensor, featmap_names is expected to
+        >>> # be ['0']. More generally, the backbone should return an
+        >>> # OrderedDict[Tensor], and in featmap_names you can choose which
+        >>> # feature maps to use.
+        >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
+        >>>                                                 output_size=7,
+        >>>                                                 sampling_ratio=2)
+        >>>
+        >>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
+        >>>                                                      output_size=14,
+        >>>                                                      sampling_ratio=2)
+        >>> # put the pieces together inside a MaskRCNN model
+        >>> model = MaskRCNN(backbone,
+        >>>                  num_classes=2,
+        >>>                  rpn_anchor_generator=anchor_generator,
+        >>>                  box_roi_pool=roi_pooler,
+        >>>                  mask_roi_pool=mask_roi_pooler)
+        >>> model.eval()
+        >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
+        >>> predictions = model(x)
+    """
+    def __init__(self, backbone, num_classes=None,
+                 # transform parameters
+                 min_size=800, max_size=1333,
+                 image_mean=None, image_std=None,
+                 # RPN parameters
+                 rpn_anchor_generator=None, rpn_head=None,
+                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
+                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
+                 rpn_nms_thresh=0.7,
+                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
+                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
+                 # Box parameters
+                 box_roi_pool=None, box_head=None, box_predictor=None,
+                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
+                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
+                 box_batch_size_per_image=512, box_positive_fraction=0.25,
+                 bbox_reg_weights=None,
+                 # Mask parameters
+                 mask_roi_pool=None, mask_head=None, mask_predictor=None):
+
+        assert isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None)))
+
+        if num_classes is not None:
+            if mask_predictor is not None:
+                raise ValueError("num_classes should be None when mask_predictor is specified")
+
+        out_channels = backbone.out_channels
+
+        if mask_roi_pool is None:
+            mask_roi_pool = MultiScaleRoIAlign(
+                featmap_names=['0', '1', '2', '3'],
+                output_size=14,
+                sampling_ratio=2)
+
+        if mask_head is None:
+            mask_layers = (256, 256, 256, 256)
+            mask_dilation = 1
+            mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation)
+
+        if mask_predictor is None:
+            mask_predictor_in_channels = 256  # == mask_layers[-1]
+            mask_dim_reduced = 256
+            mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels,
+                                               mask_dim_reduced, num_classes)
+
+        super(MaskRCNN, self).__init__(
+            backbone, num_classes,
+            # transform parameters
+            min_size, max_size,
+            image_mean, image_std,
+            # RPN-specific parameters
+            rpn_anchor_generator, rpn_head,
+            rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,
+            rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,
+            rpn_nms_thresh,
+            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
+            rpn_batch_size_per_image, rpn_positive_fraction,
+            # Box parameters
+            box_roi_pool, box_head, box_predictor,
+            box_score_thresh, box_nms_thresh, box_detections_per_img,
+            box_fg_iou_thresh, box_bg_iou_thresh,
+            box_batch_size_per_image, box_positive_fraction,
+            bbox_reg_weights)
+
+        self.roi_heads.mask_roi_pool = mask_roi_pool
+        self.roi_heads.mask_head = mask_head
+        self.roi_heads.mask_predictor = mask_predictor
+
+
+class MaskRCNNHeads(nn.Sequential):
+    def __init__(self, in_channels, layers, dilation):
+        """
+        Arguments:
+            in_channels (int): number of input channels
+            layers (list): feature dimensions of each FCN layer
+            dilation (int): dilation rate of kernel
+        """
+        d = OrderedDict()
+        next_feature = in_channels
+        for layer_idx, layer_features in enumerate(layers, 1):
+            d["mask_fcn{}".format(layer_idx)] = nn.Conv2d(
+                next_feature, layer_features, kernel_size=3,
+                stride=1, padding=dilation, dilation=dilation)
+            d["relu{}".format(layer_idx)] = nn.ReLU(inplace=True)
+            next_feature = layer_features
+
+        super(MaskRCNNHeads, self).__init__(d)
+        for name, param in self.named_parameters():
+            if "weight" in name:
+                nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
+            # elif "bias" in name:
+            #     nn.init.constant_(param, 0)
+
+
+class MaskRCNNPredictor(nn.Sequential):
+    def __init__(self, in_channels, dim_reduced, num_classes):
+        super(MaskRCNNPredictor, self).__init__(OrderedDict([
+            ("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
+            ("relu", nn.ReLU(inplace=True)),
+            ("mask_fcn_logits", nn.Conv2d(dim_reduced, num_classes, 1, 1, 0)),
+        ]))
+
+        for name, param in self.named_parameters():
+            if "weight" in name:
+                nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
+            # elif "bias" in name:
+            #     nn.init.constant_(param, 0)
+
+
+model_urls = {
+    'maskrcnn_resnet50_fpn_coco':
+        'https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth',
+}
+
+
+
[docs]def maskrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs): + """ + Constructs a Mask R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values of ``x`` + between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H`` + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the mask loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values of ``x`` + between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H`` + - labels (``Int64Tensor[N]``): the predicted labels for each image + - scores (``Tensor[N]``): the scores or each prediction + - masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to + obtain the final segmentation masks, the soft masks can be thresholded, generally + with a value of 0.5 (``mask >= 0.5``) + + Mask R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11) + + Arguments: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + num_classes (int): number of output classes of the model (including the background) + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + assert trainable_backbone_layers <= 5 and trainable_backbone_layers >= 0 + # dont freeze any layers if pretrained model or backbone is not used + if not (pretrained or pretrained_backbone): + trainable_backbone_layers = 5 + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers) + model = MaskRCNN(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['maskrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + return model
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/googlenet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/googlenet.html new file mode 100644 index 000000000000..db7672ba8cc8 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/googlenet.html @@ -0,0 +1,846 @@ + + + + + + + + + + + + torchvision.models.googlenet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.googlenet

+import warnings
+from collections import namedtuple
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.jit.annotations import Optional, Tuple
+from torch import Tensor
+from .utils import load_state_dict_from_url
+
+__all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"]
+
+model_urls = {
+    # GoogLeNet ported from TensorFlow
+    'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
+}
+
+GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
+GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor],
+                                    'aux_logits1': Optional[Tensor]}
+
+# Script annotations failed with _GoogleNetOutputs = namedtuple ...
+# _GoogLeNetOutputs set here for backwards compat
+_GoogLeNetOutputs = GoogLeNetOutputs
+
+
+
[docs]def googlenet(pretrained=False, progress=True, **kwargs): + r"""GoogLeNet (Inception v1) model architecture from + `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, adds two auxiliary branches that can improve training. + Default: *False* when pretrained is True otherwise *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' not in kwargs: + kwargs['aux_logits'] = False + if kwargs['aux_logits']: + warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' + 'so make sure to train them') + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + kwargs['init_weights'] = False + model = GoogLeNet(**kwargs) + state_dict = load_state_dict_from_url(model_urls['googlenet'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + model.aux1 = None + model.aux2 = None + return model + + return GoogLeNet(**kwargs)
+ + +class GoogLeNet(nn.Module): + __constants__ = ['aux_logits', 'transform_input'] + + def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=None, + blocks=None): + super(GoogLeNet, self).__init__() + if blocks is None: + blocks = [BasicConv2d, Inception, InceptionAux] + if init_weights is None: + warnings.warn('The default weight initialization of GoogleNet will be changed in future releases of ' + 'torchvision. If you wish to keep the old behavior (which leads to long initialization times' + ' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning) + init_weights = True + assert len(blocks) == 3 + conv_block = blocks[0] + inception_block = blocks[1] + inception_aux_block = blocks[2] + + self.aux_logits = aux_logits + self.transform_input = transform_input + + self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3) + self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.conv2 = conv_block(64, 64, kernel_size=1) + self.conv3 = conv_block(64, 192, kernel_size=3, padding=1) + self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32) + self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64) + self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64) + self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64) + self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64) + self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64) + self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128) + self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128) + self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128) + + if aux_logits: + self.aux1 = inception_aux_block(512, num_classes) + self.aux2 = inception_aux_block(528, num_classes) + else: + self.aux1 = None + self.aux2 = None + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout(0.2) + self.fc = nn.Linear(1024, num_classes) + + if init_weights: + self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + X = stats.truncnorm(-2, 2, scale=0.01) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _transform_input(self, x): + # type: (Tensor) -> Tensor + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + return x + + def _forward(self, x): + # type: (Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]] + # N x 3 x 224 x 224 + x = self.conv1(x) + # N x 64 x 112 x 112 + x = self.maxpool1(x) + # N x 64 x 56 x 56 + x = self.conv2(x) + # N x 64 x 56 x 56 + x = self.conv3(x) + # N x 192 x 56 x 56 + x = self.maxpool2(x) + + # N x 192 x 28 x 28 + x = self.inception3a(x) + # N x 256 x 28 x 28 + x = self.inception3b(x) + # N x 480 x 28 x 28 + x = self.maxpool3(x) + # N x 480 x 14 x 14 + x = self.inception4a(x) + # N x 512 x 14 x 14 + aux1 = torch.jit.annotate(Optional[Tensor], None) + if self.aux1 is not None: + if self.training: + aux1 = self.aux1(x) + + x = self.inception4b(x) + # N x 512 x 14 x 14 + x = self.inception4c(x) + # N x 512 x 14 x 14 + x = self.inception4d(x) + # N x 528 x 14 x 14 + aux2 = torch.jit.annotate(Optional[Tensor], None) + if self.aux2 is not None: + if self.training: + aux2 = self.aux2(x) + + x = self.inception4e(x) + # N x 832 x 14 x 14 + x = self.maxpool4(x) + # N x 832 x 7 x 7 + x = self.inception5a(x) + # N x 832 x 7 x 7 + x = self.inception5b(x) + # N x 1024 x 7 x 7 + + x = self.avgpool(x) + # N x 1024 x 1 x 1 + x = torch.flatten(x, 1) + # N x 1024 + x = self.dropout(x) + x = self.fc(x) + # N x 1000 (num_classes) + return x, aux2, aux1 + + @torch.jit.unused + def eager_outputs(self, x: Tensor, aux2: Tensor, aux1: Optional[Tensor]) -> GoogLeNetOutputs: + if self.training and self.aux_logits: + return _GoogLeNetOutputs(x, aux2, aux1) + else: + return x # type: ignore[return-value] + + def forward(self, x): + # type: (Tensor) -> GoogLeNetOutputs + x = self._transform_input(x) + x, aux1, aux2 = self._forward(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple") + return GoogLeNetOutputs(x, aux2, aux1) + else: + return self.eager_outputs(x, aux2, aux1) + + +class Inception(nn.Module): + + def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj, + conv_block=None): + super(Inception, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1) + + self.branch2 = nn.Sequential( + conv_block(in_channels, ch3x3red, kernel_size=1), + conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1) + ) + + self.branch3 = nn.Sequential( + conv_block(in_channels, ch5x5red, kernel_size=1), + # Here, kernel_size=3 instead of kernel_size=5 is a known bug. + # Please see https://github.com/pytorch/vision/issues/906 for details. + conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1) + ) + + self.branch4 = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True), + conv_block(in_channels, pool_proj, kernel_size=1) + ) + + def _forward(self, x): + branch1 = self.branch1(x) + branch2 = self.branch2(x) + branch3 = self.branch3(x) + branch4 = self.branch4(x) + + outputs = [branch1, branch2, branch3, branch4] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv = conv_block(in_channels, 128, kernel_size=1) + + self.fc1 = nn.Linear(2048, 1024) + self.fc2 = nn.Linear(1024, num_classes) + + def forward(self, x): + # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 + x = F.adaptive_avg_pool2d(x, (4, 4)) + # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 + x = self.conv(x) + # N x 128 x 4 x 4 + x = torch.flatten(x, 1) + # N x 2048 + x = F.relu(self.fc1(x), inplace=True) + # N x 1024 + x = F.dropout(x, 0.7, training=self.training) + # N x 1024 + x = self.fc2(x) + # N x 1000 (num_classes) + + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/inception.html b/docs/1.7.0/torchvision/_modules/torchvision/models/inception.html new file mode 100644 index 000000000000..fba7f67640ee --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/inception.html @@ -0,0 +1,993 @@ + + + + + + + + + + + + torchvision.models.inception — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.inception

+from collections import namedtuple
+import warnings
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.jit.annotations import Optional
+from torch import Tensor
+from .utils import load_state_dict_from_url
+
+
+__all__ = ['Inception3', 'inception_v3', 'InceptionOutputs', '_InceptionOutputs']
+
+
+model_urls = {
+    # Inception v3 ported from TensorFlow
+    'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
+}
+
+InceptionOutputs = namedtuple('InceptionOutputs', ['logits', 'aux_logits'])
+InceptionOutputs.__annotations__ = {'logits': torch.Tensor, 'aux_logits': Optional[torch.Tensor]}
+
+# Script annotations failed with _GoogleNetOutputs = namedtuple ...
+# _InceptionOutputs set here for backwards compat
+_InceptionOutputs = InceptionOutputs
+
+
+
[docs]def inception_v3(pretrained=False, progress=True, **kwargs): + r"""Inception v3 model architecture from + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. + + .. note:: + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of + N x 3 x 299 x 299, so ensure your images are sized accordingly. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, add an auxiliary branch that can improve training. + Default: *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + kwargs['init_weights'] = False # we are loading weights from a pretrained model + model = Inception3(**kwargs) + state_dict = load_state_dict_from_url(model_urls['inception_v3_google'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + del model.AuxLogits + return model + + return Inception3(**kwargs)
+ + +class Inception3(nn.Module): + + def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, + inception_blocks=None, init_weights=None): + super(Inception3, self).__init__() + if inception_blocks is None: + inception_blocks = [ + BasicConv2d, InceptionA, InceptionB, InceptionC, + InceptionD, InceptionE, InceptionAux + ] + if init_weights is None: + warnings.warn('The default weight initialization of inception_v3 will be changed in future releases of ' + 'torchvision. If you wish to keep the old behavior (which leads to long initialization times' + ' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning) + init_weights = True + assert len(inception_blocks) == 7 + conv_block = inception_blocks[0] + inception_a = inception_blocks[1] + inception_b = inception_blocks[2] + inception_c = inception_blocks[3] + inception_d = inception_blocks[4] + inception_e = inception_blocks[5] + inception_aux = inception_blocks[6] + + self.aux_logits = aux_logits + self.transform_input = transform_input + self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) + self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = inception_a(192, pool_features=32) + self.Mixed_5c = inception_a(256, pool_features=64) + self.Mixed_5d = inception_a(288, pool_features=64) + self.Mixed_6a = inception_b(288) + self.Mixed_6b = inception_c(768, channels_7x7=128) + self.Mixed_6c = inception_c(768, channels_7x7=160) + self.Mixed_6d = inception_c(768, channels_7x7=160) + self.Mixed_6e = inception_c(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = inception_aux(768, num_classes) + self.Mixed_7a = inception_d(768) + self.Mixed_7b = inception_e(1280) + self.Mixed_7c = inception_e(2048) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout() + self.fc = nn.Linear(2048, num_classes) + if init_weights: + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + X = stats.truncnorm(-2, 2, scale=stddev) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _transform_input(self, x): + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + return x + + def _forward(self, x): + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.maxpool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.maxpool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + aux_defined = self.training and self.aux_logits + if aux_defined: + aux = self.AuxLogits(x) + else: + aux = None + # N x 768 x 17 x 17 + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + # Adaptive average pooling + x = self.avgpool(x) + # N x 2048 x 1 x 1 + x = self.dropout(x) + # N x 2048 x 1 x 1 + x = torch.flatten(x, 1) + # N x 2048 + x = self.fc(x) + # N x 1000 (num_classes) + return x, aux + + @torch.jit.unused + def eager_outputs(self, x: torch.Tensor, aux: Optional[Tensor]) -> InceptionOutputs: + if self.training and self.aux_logits: + return InceptionOutputs(x, aux) + else: + return x # type: ignore[return-value] + + def forward(self, x): + x = self._transform_input(x) + x, aux = self._forward(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted Inception3 always returns Inception3 Tuple") + return InceptionOutputs(x, aux) + else: + return self.eager_outputs(x, aux) + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = nn.Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/mnasnet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/mnasnet.html new file mode 100644 index 000000000000..84206312b1ee --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/mnasnet.html @@ -0,0 +1,809 @@ + + + + + + + + + + + + torchvision.models.mnasnet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.mnasnet

+import warnings
+
+import torch
+import torch.nn as nn
+from .utils import load_state_dict_from_url
+
+__all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3']
+
+_MODEL_URLS = {
+    "mnasnet0_5":
+    "https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
+    "mnasnet0_75": None,
+    "mnasnet1_0":
+    "https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
+    "mnasnet1_3": None
+}
+
+# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is
+# 1.0 - tensorflow.
+_BN_MOMENTUM = 1 - 0.9997
+
+
+class _InvertedResidual(nn.Module):
+
+    def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor,
+                 bn_momentum=0.1):
+        super(_InvertedResidual, self).__init__()
+        assert stride in [1, 2]
+        assert kernel_size in [3, 5]
+        mid_ch = in_ch * expansion_factor
+        self.apply_residual = (in_ch == out_ch and stride == 1)
+        self.layers = nn.Sequential(
+            # Pointwise
+            nn.Conv2d(in_ch, mid_ch, 1, bias=False),
+            nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
+            nn.ReLU(inplace=True),
+            # Depthwise
+            nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2,
+                      stride=stride, groups=mid_ch, bias=False),
+            nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
+            nn.ReLU(inplace=True),
+            # Linear pointwise. Note that there's no activation.
+            nn.Conv2d(mid_ch, out_ch, 1, bias=False),
+            nn.BatchNorm2d(out_ch, momentum=bn_momentum))
+
+    def forward(self, input):
+        if self.apply_residual:
+            return self.layers(input) + input
+        else:
+            return self.layers(input)
+
+
+def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats,
+           bn_momentum):
+    """ Creates a stack of inverted residuals. """
+    assert repeats >= 1
+    # First one has no skip, because feature map size changes.
+    first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor,
+                              bn_momentum=bn_momentum)
+    remaining = []
+    for _ in range(1, repeats):
+        remaining.append(
+            _InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor,
+                              bn_momentum=bn_momentum))
+    return nn.Sequential(first, *remaining)
+
+
+def _round_to_multiple_of(val, divisor, round_up_bias=0.9):
+    """ Asymmetric rounding to make `val` divisible by `divisor`. With default
+    bias, will round up, unless the number is no more than 10% greater than the
+    smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """
+    assert 0.0 < round_up_bias < 1.0
+    new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
+    return new_val if new_val >= round_up_bias * val else new_val + divisor
+
+
+def _get_depths(alpha):
+    """ Scales tensor depths as in reference MobileNet code, prefers rouding up
+    rather than down. """
+    depths = [32, 16, 24, 40, 80, 96, 192, 320]
+    return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]
+
+
+class MNASNet(torch.nn.Module):
+    """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This
+    implements the B1 variant of the model.
+    >>> model = MNASNet(1000, 1.0)
+    >>> x = torch.rand(1, 3, 224, 224)
+    >>> y = model(x)
+    >>> y.dim()
+    1
+    >>> y.nelement()
+    1000
+    """
+    # Version 2 adds depth scaling in the initial stages of the network.
+    _version = 2
+
+    def __init__(self, alpha, num_classes=1000, dropout=0.2):
+        super(MNASNet, self).__init__()
+        assert alpha > 0.0
+        self.alpha = alpha
+        self.num_classes = num_classes
+        depths = _get_depths(alpha)
+        layers = [
+            # First layer: regular conv.
+            nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False),
+            nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
+            nn.ReLU(inplace=True),
+            # Depthwise separable, no skip.
+            nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1,
+                      groups=depths[0], bias=False),
+            nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False),
+            nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM),
+            # MNASNet blocks: stacks of inverted residuals.
+            _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
+            _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM),
+            _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM),
+            _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM),
+            _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM),
+            _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM),
+            # Final mapping to classifier input.
+            nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False),
+            nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
+            nn.ReLU(inplace=True),
+        ]
+        self.layers = nn.Sequential(*layers)
+        self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True),
+                                        nn.Linear(1280, num_classes))
+        self._initialize_weights()
+
+    def forward(self, x):
+        x = self.layers(x)
+        # Equivalent to global avgpool and removing H and W dimensions.
+        x = x.mean([2, 3])
+        return self.classifier(x)
+
+    def _initialize_weights(self):
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight, mode="fan_out",
+                                        nonlinearity="relu")
+                if m.bias is not None:
+                    nn.init.zeros_(m.bias)
+            elif isinstance(m, nn.BatchNorm2d):
+                nn.init.ones_(m.weight)
+                nn.init.zeros_(m.bias)
+            elif isinstance(m, nn.Linear):
+                nn.init.kaiming_uniform_(m.weight, mode="fan_out",
+                                         nonlinearity="sigmoid")
+                nn.init.zeros_(m.bias)
+
+    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+                              missing_keys, unexpected_keys, error_msgs):
+        version = local_metadata.get("version", None)
+        assert version in [1, 2]
+
+        if version == 1 and not self.alpha == 1.0:
+            # In the initial version of the model (v1), stem was fixed-size.
+            # All other layer configurations were the same. This will patch
+            # the model so that it's identical to v1. Model with alpha 1.0 is
+            # unaffected.
+            depths = _get_depths(self.alpha)
+            v1_stem = [
+                nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False),
+                nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
+                nn.ReLU(inplace=True),
+                nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32,
+                          bias=False),
+                nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
+                nn.ReLU(inplace=True),
+                nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False),
+                nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
+                _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
+            ]
+            for idx, layer in enumerate(v1_stem):
+                self.layers[idx] = layer
+
+            # The model is now identical to v1, and must be saved as such.
+            self._version = 1
+            warnings.warn(
+                "A new version of MNASNet model has been implemented. "
+                "Your checkpoint was saved using the previous version. "
+                "This checkpoint will load and work as before, but "
+                "you may want to upgrade by training a newer model or "
+                "transfer learning from an updated ImageNet checkpoint.",
+                UserWarning)
+
+        super(MNASNet, self)._load_from_state_dict(
+            state_dict, prefix, local_metadata, strict, missing_keys,
+            unexpected_keys, error_msgs)
+
+
+def _load_pretrained(model_name, model, progress):
+    if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None:
+        raise ValueError(
+            "No checkpoint is available for model type {}".format(model_name))
+    checkpoint_url = _MODEL_URLS[model_name]
+    model.load_state_dict(
+        load_state_dict_from_url(checkpoint_url, progress=progress))
+
+
+
[docs]def mnasnet0_5(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 0.5 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.5, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_5", model, progress) + return model
+ + +
[docs]def mnasnet0_75(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 0.75 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.75, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_75", model, progress) + return model
+ + +
[docs]def mnasnet1_0(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 1.0 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.0, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_0", model, progress) + return model
+ + +
[docs]def mnasnet1_3(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 1.3 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.3, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_3", model, progress) + return model
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/mobilenet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/mobilenet.html new file mode 100644 index 000000000000..a3001179b075 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/mobilenet.html @@ -0,0 +1,740 @@ + + + + + + + + + + + + torchvision.models.mobilenet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.mobilenet

+from torch import nn
+from .utils import load_state_dict_from_url
+
+
+__all__ = ['MobileNetV2', 'mobilenet_v2']
+
+
+model_urls = {
+    'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
+}
+
+
+def _make_divisible(v, divisor, min_value=None):
+    """
+    This function is taken from the original tf repo.
+    It ensures that all layers have a channel number that is divisible by 8
+    It can be seen here:
+    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+    :param v:
+    :param divisor:
+    :param min_value:
+    :return:
+    """
+    if min_value is None:
+        min_value = divisor
+    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+    # Make sure that round down does not go down by more than 10%.
+    if new_v < 0.9 * v:
+        new_v += divisor
+    return new_v
+
+
+class ConvBNReLU(nn.Sequential):
+    def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
+        padding = (kernel_size - 1) // 2
+        if norm_layer is None:
+            norm_layer = nn.BatchNorm2d
+        super(ConvBNReLU, self).__init__(
+            nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
+            norm_layer(out_planes),
+            nn.ReLU6(inplace=True)
+        )
+
+
+class InvertedResidual(nn.Module):
+    def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
+        super(InvertedResidual, self).__init__()
+        self.stride = stride
+        assert stride in [1, 2]
+
+        if norm_layer is None:
+            norm_layer = nn.BatchNorm2d
+
+        hidden_dim = int(round(inp * expand_ratio))
+        self.use_res_connect = self.stride == 1 and inp == oup
+
+        layers = []
+        if expand_ratio != 1:
+            # pw
+            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
+        layers.extend([
+            # dw
+            ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
+            # pw-linear
+            nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
+            norm_layer(oup),
+        ])
+        self.conv = nn.Sequential(*layers)
+
+    def forward(self, x):
+        if self.use_res_connect:
+            return x + self.conv(x)
+        else:
+            return self.conv(x)
+
+
+class MobileNetV2(nn.Module):
+    def __init__(self,
+                 num_classes=1000,
+                 width_mult=1.0,
+                 inverted_residual_setting=None,
+                 round_nearest=8,
+                 block=None,
+                 norm_layer=None):
+        """
+        MobileNet V2 main class
+
+        Args:
+            num_classes (int): Number of classes
+            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
+            inverted_residual_setting: Network structure
+            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
+            Set to 1 to turn off rounding
+            block: Module specifying inverted residual building block for mobilenet
+            norm_layer: Module specifying the normalization layer to use
+
+        """
+        super(MobileNetV2, self).__init__()
+
+        if block is None:
+            block = InvertedResidual
+
+        if norm_layer is None:
+            norm_layer = nn.BatchNorm2d
+
+        input_channel = 32
+        last_channel = 1280
+
+        if inverted_residual_setting is None:
+            inverted_residual_setting = [
+                # t, c, n, s
+                [1, 16, 1, 1],
+                [6, 24, 2, 2],
+                [6, 32, 3, 2],
+                [6, 64, 4, 2],
+                [6, 96, 3, 1],
+                [6, 160, 3, 2],
+                [6, 320, 1, 1],
+            ]
+
+        # only check the first element, assuming user knows t,c,n,s are required
+        if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
+            raise ValueError("inverted_residual_setting should be non-empty "
+                             "or a 4-element list, got {}".format(inverted_residual_setting))
+
+        # building first layer
+        input_channel = _make_divisible(input_channel * width_mult, round_nearest)
+        self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
+        features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
+        # building inverted residual blocks
+        for t, c, n, s in inverted_residual_setting:
+            output_channel = _make_divisible(c * width_mult, round_nearest)
+            for i in range(n):
+                stride = s if i == 0 else 1
+                features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
+                input_channel = output_channel
+        # building last several layers
+        features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
+        # make it nn.Sequential
+        self.features = nn.Sequential(*features)
+
+        # building classifier
+        self.classifier = nn.Sequential(
+            nn.Dropout(0.2),
+            nn.Linear(self.last_channel, num_classes),
+        )
+
+        # weight initialization
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight, mode='fan_out')
+                if m.bias is not None:
+                    nn.init.zeros_(m.bias)
+            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
+                nn.init.ones_(m.weight)
+                nn.init.zeros_(m.bias)
+            elif isinstance(m, nn.Linear):
+                nn.init.normal_(m.weight, 0, 0.01)
+                nn.init.zeros_(m.bias)
+
+    def _forward_impl(self, x):
+        # This exists since TorchScript doesn't support inheritance, so the superclass method
+        # (this one) needs to have a name other than `forward` that can be accessed in a subclass
+        x = self.features(x)
+        # Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
+        x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
+        x = self.classifier(x)
+        return x
+
+    def forward(self, x):
+        return self._forward_impl(x)
+
+
+
[docs]def mobilenet_v2(pretrained=False, progress=True, **kwargs): + """ + Constructs a MobileNetV2 architecture from + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MobileNetV2(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], + progress=progress) + model.load_state_dict(state_dict) + return model
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/resnet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/resnet.html new file mode 100644 index 000000000000..09a585f3cacf --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/resnet.html @@ -0,0 +1,905 @@ + + + + + + + + + + + + torchvision.models.resnet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.resnet

+import torch
+import torch.nn as nn
+from .utils import load_state_dict_from_url
+
+
+__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
+           'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
+           'wide_resnet50_2', 'wide_resnet101_2']
+
+
+model_urls = {
+    'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
+    'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
+    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
+    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
+    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
+    'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
+    'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
+    'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
+    'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
+}
+
+
+def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
+    """3x3 convolution with padding"""
+    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+                     padding=dilation, groups=groups, bias=False, dilation=dilation)
+
+
+def conv1x1(in_planes, out_planes, stride=1):
+    """1x1 convolution"""
+    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
+
+
+class BasicBlock(nn.Module):
+    expansion = 1
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
+                 base_width=64, dilation=1, norm_layer=None):
+        super(BasicBlock, self).__init__()
+        if norm_layer is None:
+            norm_layer = nn.BatchNorm2d
+        if groups != 1 or base_width != 64:
+            raise ValueError('BasicBlock only supports groups=1 and base_width=64')
+        if dilation > 1:
+            raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
+        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
+        self.conv1 = conv3x3(inplanes, planes, stride)
+        self.bn1 = norm_layer(planes)
+        self.relu = nn.ReLU(inplace=True)
+        self.conv2 = conv3x3(planes, planes)
+        self.bn2 = norm_layer(planes)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        identity = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+
+        if self.downsample is not None:
+            identity = self.downsample(x)
+
+        out += identity
+        out = self.relu(out)
+
+        return out
+
+
+class Bottleneck(nn.Module):
+    # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
+    # while original implementation places the stride at the first 1x1 convolution(self.conv1)
+    # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
+    # This variant is also known as ResNet V1.5 and improves accuracy according to
+    # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
+
+    expansion = 4
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
+                 base_width=64, dilation=1, norm_layer=None):
+        super(Bottleneck, self).__init__()
+        if norm_layer is None:
+            norm_layer = nn.BatchNorm2d
+        width = int(planes * (base_width / 64.)) * groups
+        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
+        self.conv1 = conv1x1(inplanes, width)
+        self.bn1 = norm_layer(width)
+        self.conv2 = conv3x3(width, width, stride, groups, dilation)
+        self.bn2 = norm_layer(width)
+        self.conv3 = conv1x1(width, planes * self.expansion)
+        self.bn3 = norm_layer(planes * self.expansion)
+        self.relu = nn.ReLU(inplace=True)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        identity = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+        out = self.relu(out)
+
+        out = self.conv3(out)
+        out = self.bn3(out)
+
+        if self.downsample is not None:
+            identity = self.downsample(x)
+
+        out += identity
+        out = self.relu(out)
+
+        return out
+
+
+class ResNet(nn.Module):
+
+    def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
+                 groups=1, width_per_group=64, replace_stride_with_dilation=None,
+                 norm_layer=None):
+        super(ResNet, self).__init__()
+        if norm_layer is None:
+            norm_layer = nn.BatchNorm2d
+        self._norm_layer = norm_layer
+
+        self.inplanes = 64
+        self.dilation = 1
+        if replace_stride_with_dilation is None:
+            # each element in the tuple indicates if we should replace
+            # the 2x2 stride with a dilated convolution instead
+            replace_stride_with_dilation = [False, False, False]
+        if len(replace_stride_with_dilation) != 3:
+            raise ValueError("replace_stride_with_dilation should be None "
+                             "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
+        self.groups = groups
+        self.base_width = width_per_group
+        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
+                               bias=False)
+        self.bn1 = norm_layer(self.inplanes)
+        self.relu = nn.ReLU(inplace=True)
+        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+        self.layer1 = self._make_layer(block, 64, layers[0])
+        self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
+                                       dilate=replace_stride_with_dilation[0])
+        self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
+                                       dilate=replace_stride_with_dilation[1])
+        self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
+                                       dilate=replace_stride_with_dilation[2])
+        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
+        self.fc = nn.Linear(512 * block.expansion, num_classes)
+
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
+                nn.init.constant_(m.weight, 1)
+                nn.init.constant_(m.bias, 0)
+
+        # Zero-initialize the last BN in each residual branch,
+        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
+        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
+        if zero_init_residual:
+            for m in self.modules():
+                if isinstance(m, Bottleneck):
+                    nn.init.constant_(m.bn3.weight, 0)
+                elif isinstance(m, BasicBlock):
+                    nn.init.constant_(m.bn2.weight, 0)
+
+    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
+        norm_layer = self._norm_layer
+        downsample = None
+        previous_dilation = self.dilation
+        if dilate:
+            self.dilation *= stride
+            stride = 1
+        if stride != 1 or self.inplanes != planes * block.expansion:
+            downsample = nn.Sequential(
+                conv1x1(self.inplanes, planes * block.expansion, stride),
+                norm_layer(planes * block.expansion),
+            )
+
+        layers = []
+        layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
+                            self.base_width, previous_dilation, norm_layer))
+        self.inplanes = planes * block.expansion
+        for _ in range(1, blocks):
+            layers.append(block(self.inplanes, planes, groups=self.groups,
+                                base_width=self.base_width, dilation=self.dilation,
+                                norm_layer=norm_layer))
+
+        return nn.Sequential(*layers)
+
+    def _forward_impl(self, x):
+        # See note [TorchScript super()]
+        x = self.conv1(x)
+        x = self.bn1(x)
+        x = self.relu(x)
+        x = self.maxpool(x)
+
+        x = self.layer1(x)
+        x = self.layer2(x)
+        x = self.layer3(x)
+        x = self.layer4(x)
+
+        x = self.avgpool(x)
+        x = torch.flatten(x, 1)
+        x = self.fc(x)
+
+        return x
+
+    def forward(self, x):
+        return self._forward_impl(x)
+
+
+def _resnet(arch, block, layers, pretrained, progress, **kwargs):
+    model = ResNet(block, layers, **kwargs)
+    if pretrained:
+        state_dict = load_state_dict_from_url(model_urls[arch],
+                                              progress=progress)
+        model.load_state_dict(state_dict)
+    return model
+
+
+
[docs]def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs)
+ + +
[docs]def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs)
+ + +
[docs]def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs)
+ + +
[docs]def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs)
+ + +
[docs]def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs)
+ + +
[docs]def resnext50_32x4d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs)
+ + +
[docs]def resnext101_32x8d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs)
+ + +
[docs]def wide_resnet50_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs)
+ + +
[docs]def wide_resnet101_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/segmentation/segmentation.html b/docs/1.7.0/torchvision/_modules/torchvision/models/segmentation/segmentation.html new file mode 100644 index 000000000000..0d3723bb9d7d --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/segmentation/segmentation.html @@ -0,0 +1,658 @@ + + + + + + + + + + + + torchvision.models.segmentation.segmentation — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.segmentation.segmentation

+from .._utils import IntermediateLayerGetter
+from ..utils import load_state_dict_from_url
+from .. import resnet
+from .deeplabv3 import DeepLabHead, DeepLabV3
+from .fcn import FCN, FCNHead
+
+
+__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101']
+
+
+model_urls = {
+    'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',
+    'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',
+    'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',
+    'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',
+}
+
+
+def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):
+    backbone = resnet.__dict__[backbone_name](
+        pretrained=pretrained_backbone,
+        replace_stride_with_dilation=[False, True, True])
+
+    return_layers = {'layer4': 'out'}
+    if aux:
+        return_layers['layer3'] = 'aux'
+    backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
+
+    aux_classifier = None
+    if aux:
+        inplanes = 1024
+        aux_classifier = FCNHead(inplanes, num_classes)
+
+    model_map = {
+        'deeplabv3': (DeepLabHead, DeepLabV3),
+        'fcn': (FCNHead, FCN),
+    }
+    inplanes = 2048
+    classifier = model_map[name][0](inplanes, num_classes)
+    base_model = model_map[name][1]
+
+    model = base_model(backbone, classifier, aux_classifier)
+    return model
+
+
+def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):
+    if pretrained:
+        aux_loss = True
+    model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)
+    if pretrained:
+        arch = arch_type + '_' + backbone + '_coco'
+        model_url = model_urls[arch]
+        if model_url is None:
+            raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
+        else:
+            state_dict = load_state_dict_from_url(model_url, progress=progress)
+            model.load_state_dict(state_dict)
+    return model
+
+
+
[docs]def fcn_resnet50(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
+ + +
[docs]def fcn_resnet101(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
+ + +
[docs]def deeplabv3_resnet50(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a ResNet-50 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
+ + +
[docs]def deeplabv3_resnet101(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a ResNet-101 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/shufflenetv2.html b/docs/1.7.0/torchvision/_modules/torchvision/models/shufflenetv2.html new file mode 100644 index 000000000000..d748daba8a02 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/shufflenetv2.html @@ -0,0 +1,760 @@ + + + + + + + + + + + + torchvision.models.shufflenetv2 — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.shufflenetv2

+import torch
+import torch.nn as nn
+from .utils import load_state_dict_from_url
+
+
+__all__ = [
+    'ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0',
+    'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0'
+]
+
+model_urls = {
+    'shufflenetv2_x0.5': 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth',
+    'shufflenetv2_x1.0': 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth',
+    'shufflenetv2_x1.5': None,
+    'shufflenetv2_x2.0': None,
+}
+
+
+def channel_shuffle(x, groups):
+    # type: (torch.Tensor, int) -> torch.Tensor
+    batchsize, num_channels, height, width = x.data.size()
+    channels_per_group = num_channels // groups
+
+    # reshape
+    x = x.view(batchsize, groups,
+               channels_per_group, height, width)
+
+    x = torch.transpose(x, 1, 2).contiguous()
+
+    # flatten
+    x = x.view(batchsize, -1, height, width)
+
+    return x
+
+
+class InvertedResidual(nn.Module):
+    def __init__(self, inp, oup, stride):
+        super(InvertedResidual, self).__init__()
+
+        if not (1 <= stride <= 3):
+            raise ValueError('illegal stride value')
+        self.stride = stride
+
+        branch_features = oup // 2
+        assert (self.stride != 1) or (inp == branch_features << 1)
+
+        if self.stride > 1:
+            self.branch1 = nn.Sequential(
+                self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
+                nn.BatchNorm2d(inp),
+                nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
+                nn.BatchNorm2d(branch_features),
+                nn.ReLU(inplace=True),
+            )
+        else:
+            self.branch1 = nn.Sequential()
+
+        self.branch2 = nn.Sequential(
+            nn.Conv2d(inp if (self.stride > 1) else branch_features,
+                      branch_features, kernel_size=1, stride=1, padding=0, bias=False),
+            nn.BatchNorm2d(branch_features),
+            nn.ReLU(inplace=True),
+            self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
+            nn.BatchNorm2d(branch_features),
+            nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
+            nn.BatchNorm2d(branch_features),
+            nn.ReLU(inplace=True),
+        )
+
+    @staticmethod
+    def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
+        return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
+
+    def forward(self, x):
+        if self.stride == 1:
+            x1, x2 = x.chunk(2, dim=1)
+            out = torch.cat((x1, self.branch2(x2)), dim=1)
+        else:
+            out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
+
+        out = channel_shuffle(out, 2)
+
+        return out
+
+
+class ShuffleNetV2(nn.Module):
+    def __init__(self, stages_repeats, stages_out_channels, num_classes=1000, inverted_residual=InvertedResidual):
+        super(ShuffleNetV2, self).__init__()
+
+        if len(stages_repeats) != 3:
+            raise ValueError('expected stages_repeats as list of 3 positive ints')
+        if len(stages_out_channels) != 5:
+            raise ValueError('expected stages_out_channels as list of 5 positive ints')
+        self._stage_out_channels = stages_out_channels
+
+        input_channels = 3
+        output_channels = self._stage_out_channels[0]
+        self.conv1 = nn.Sequential(
+            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
+            nn.BatchNorm2d(output_channels),
+            nn.ReLU(inplace=True),
+        )
+        input_channels = output_channels
+
+        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
+        for name, repeats, output_channels in zip(
+                stage_names, stages_repeats, self._stage_out_channels[1:]):
+            seq = [inverted_residual(input_channels, output_channels, 2)]
+            for i in range(repeats - 1):
+                seq.append(inverted_residual(output_channels, output_channels, 1))
+            setattr(self, name, nn.Sequential(*seq))
+            input_channels = output_channels
+
+        output_channels = self._stage_out_channels[-1]
+        self.conv5 = nn.Sequential(
+            nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
+            nn.BatchNorm2d(output_channels),
+            nn.ReLU(inplace=True),
+        )
+
+        self.fc = nn.Linear(output_channels, num_classes)
+
+    def _forward_impl(self, x):
+        # See note [TorchScript super()]
+        x = self.conv1(x)
+        x = self.maxpool(x)
+        x = self.stage2(x)
+        x = self.stage3(x)
+        x = self.stage4(x)
+        x = self.conv5(x)
+        x = x.mean([2, 3])  # globalpool
+        x = self.fc(x)
+        return x
+
+    def forward(self, x):
+        return self._forward_impl(x)
+
+
+def _shufflenetv2(arch, pretrained, progress, *args, **kwargs):
+    model = ShuffleNetV2(*args, **kwargs)
+
+    if pretrained:
+        model_url = model_urls[arch]
+        if model_url is None:
+            raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
+        else:
+            state_dict = load_state_dict_from_url(model_url, progress=progress)
+            model.load_state_dict(state_dict)
+
+    return model
+
+
+
[docs]def shufflenet_v2_x0_5(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 0.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress, + [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
+ + +
[docs]def shufflenet_v2_x1_0(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 1.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, + [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs)
+ + +
[docs]def shufflenet_v2_x1_5(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 1.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress, + [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs)
+ + +
[docs]def shufflenet_v2_x2_0(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 2.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, + [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/squeezenet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/squeezenet.html new file mode 100644 index 000000000000..f35f37e91117 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/squeezenet.html @@ -0,0 +1,689 @@ + + + + + + + + + + + + torchvision.models.squeezenet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.squeezenet

+import torch
+import torch.nn as nn
+import torch.nn.init as init
+from .utils import load_state_dict_from_url
+
+__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
+
+model_urls = {
+    'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
+    'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
+}
+
+
+class Fire(nn.Module):
+
+    def __init__(self, inplanes, squeeze_planes,
+                 expand1x1_planes, expand3x3_planes):
+        super(Fire, self).__init__()
+        self.inplanes = inplanes
+        self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
+        self.squeeze_activation = nn.ReLU(inplace=True)
+        self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
+                                   kernel_size=1)
+        self.expand1x1_activation = nn.ReLU(inplace=True)
+        self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
+                                   kernel_size=3, padding=1)
+        self.expand3x3_activation = nn.ReLU(inplace=True)
+
+    def forward(self, x):
+        x = self.squeeze_activation(self.squeeze(x))
+        return torch.cat([
+            self.expand1x1_activation(self.expand1x1(x)),
+            self.expand3x3_activation(self.expand3x3(x))
+        ], 1)
+
+
+class SqueezeNet(nn.Module):
+
+    def __init__(self, version='1_0', num_classes=1000):
+        super(SqueezeNet, self).__init__()
+        self.num_classes = num_classes
+        if version == '1_0':
+            self.features = nn.Sequential(
+                nn.Conv2d(3, 96, kernel_size=7, stride=2),
+                nn.ReLU(inplace=True),
+                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
+                Fire(96, 16, 64, 64),
+                Fire(128, 16, 64, 64),
+                Fire(128, 32, 128, 128),
+                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
+                Fire(256, 32, 128, 128),
+                Fire(256, 48, 192, 192),
+                Fire(384, 48, 192, 192),
+                Fire(384, 64, 256, 256),
+                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
+                Fire(512, 64, 256, 256),
+            )
+        elif version == '1_1':
+            self.features = nn.Sequential(
+                nn.Conv2d(3, 64, kernel_size=3, stride=2),
+                nn.ReLU(inplace=True),
+                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
+                Fire(64, 16, 64, 64),
+                Fire(128, 16, 64, 64),
+                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
+                Fire(128, 32, 128, 128),
+                Fire(256, 32, 128, 128),
+                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
+                Fire(256, 48, 192, 192),
+                Fire(384, 48, 192, 192),
+                Fire(384, 64, 256, 256),
+                Fire(512, 64, 256, 256),
+            )
+        else:
+            # FIXME: Is this needed? SqueezeNet should only be called from the
+            # FIXME: squeezenet1_x() functions
+            # FIXME: This checking is not done for the other models
+            raise ValueError("Unsupported SqueezeNet version {version}:"
+                             "1_0 or 1_1 expected".format(version=version))
+
+        # Final convolution is initialized differently from the rest
+        final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
+        self.classifier = nn.Sequential(
+            nn.Dropout(p=0.5),
+            final_conv,
+            nn.ReLU(inplace=True),
+            nn.AdaptiveAvgPool2d((1, 1))
+        )
+
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                if m is final_conv:
+                    init.normal_(m.weight, mean=0.0, std=0.01)
+                else:
+                    init.kaiming_uniform_(m.weight)
+                if m.bias is not None:
+                    init.constant_(m.bias, 0)
+
+    def forward(self, x):
+        x = self.features(x)
+        x = self.classifier(x)
+        return torch.flatten(x, 1)
+
+
+def _squeezenet(version, pretrained, progress, **kwargs):
+    model = SqueezeNet(version, **kwargs)
+    if pretrained:
+        arch = 'squeezenet' + version
+        state_dict = load_state_dict_from_url(model_urls[arch],
+                                              progress=progress)
+        model.load_state_dict(state_dict)
+    return model
+
+
+
[docs]def squeezenet1_0(pretrained=False, progress=True, **kwargs): + r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level + accuracy with 50x fewer parameters and <0.5MB model size" + <https://arxiv.org/abs/1602.07360>`_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _squeezenet('1_0', pretrained, progress, **kwargs)
+ + +
[docs]def squeezenet1_1(pretrained=False, progress=True, **kwargs): + r"""SqueezeNet 1.1 model from the `official SqueezeNet repo + <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. + SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters + than SqueezeNet 1.0, without sacrificing accuracy. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _squeezenet('1_1', pretrained, progress, **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/vgg.html b/docs/1.7.0/torchvision/_modules/torchvision/models/vgg.html new file mode 100644 index 000000000000..68503b64d946 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/vgg.html @@ -0,0 +1,735 @@ + + + + + + + + + + + + torchvision.models.vgg — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.vgg

+import torch
+import torch.nn as nn
+from .utils import load_state_dict_from_url
+
+
+__all__ = [
+    'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
+    'vgg19_bn', 'vgg19',
+]
+
+
+model_urls = {
+    'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
+    'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
+    'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
+    'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
+    'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
+    'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
+    'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
+    'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
+}
+
+
+class VGG(nn.Module):
+
+    def __init__(self, features, num_classes=1000, init_weights=True):
+        super(VGG, self).__init__()
+        self.features = features
+        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
+        self.classifier = nn.Sequential(
+            nn.Linear(512 * 7 * 7, 4096),
+            nn.ReLU(True),
+            nn.Dropout(),
+            nn.Linear(4096, 4096),
+            nn.ReLU(True),
+            nn.Dropout(),
+            nn.Linear(4096, num_classes),
+        )
+        if init_weights:
+            self._initialize_weights()
+
+    def forward(self, x):
+        x = self.features(x)
+        x = self.avgpool(x)
+        x = torch.flatten(x, 1)
+        x = self.classifier(x)
+        return x
+
+    def _initialize_weights(self):
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.BatchNorm2d):
+                nn.init.constant_(m.weight, 1)
+                nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.Linear):
+                nn.init.normal_(m.weight, 0, 0.01)
+                nn.init.constant_(m.bias, 0)
+
+
+def make_layers(cfg, batch_norm=False):
+    layers = []
+    in_channels = 3
+    for v in cfg:
+        if v == 'M':
+            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
+        else:
+            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
+            if batch_norm:
+                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
+            else:
+                layers += [conv2d, nn.ReLU(inplace=True)]
+            in_channels = v
+    return nn.Sequential(*layers)
+
+
+cfgs = {
+    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
+    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
+    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
+    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
+}
+
+
+def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
+    if pretrained:
+        kwargs['init_weights'] = False
+    model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
+    if pretrained:
+        state_dict = load_state_dict_from_url(model_urls[arch],
+                                              progress=progress)
+        model.load_state_dict(state_dict)
+    return model
+
+
+
[docs]def vgg11(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
+ + +
[docs]def vgg11_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
+ + +
[docs]def vgg13(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
+ + +
[docs]def vgg13_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
+ + +
[docs]def vgg16(pretrained=False, progress=True, **kwargs): + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
+ + +
[docs]def vgg16_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
+ + +
[docs]def vgg19(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
+ + +
[docs]def vgg19_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/models/video/resnet.html b/docs/1.7.0/torchvision/_modules/torchvision/models/video/resnet.html new file mode 100644 index 000000000000..2d242febb4d6 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/models/video/resnet.html @@ -0,0 +1,893 @@ + + + + + + + + + + + + torchvision.models.video.resnet — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.models.video.resnet

+import torch
+import torch.nn as nn
+
+from ..utils import load_state_dict_from_url
+
+
+__all__ = ['r3d_18', 'mc3_18', 'r2plus1d_18']
+
+model_urls = {
+    'r3d_18': 'https://download.pytorch.org/models/r3d_18-b3b3357e.pth',
+    'mc3_18': 'https://download.pytorch.org/models/mc3_18-a90a0ba3.pth',
+    'r2plus1d_18': 'https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth',
+}
+
+
+class Conv3DSimple(nn.Conv3d):
+    def __init__(self,
+                 in_planes,
+                 out_planes,
+                 midplanes=None,
+                 stride=1,
+                 padding=1):
+
+        super(Conv3DSimple, self).__init__(
+            in_channels=in_planes,
+            out_channels=out_planes,
+            kernel_size=(3, 3, 3),
+            stride=stride,
+            padding=padding,
+            bias=False)
+
+    @staticmethod
+    def get_downsample_stride(stride):
+        return (stride, stride, stride)
+
+
+class Conv2Plus1D(nn.Sequential):
+
+    def __init__(self,
+                 in_planes,
+                 out_planes,
+                 midplanes,
+                 stride=1,
+                 padding=1):
+        super(Conv2Plus1D, self).__init__(
+            nn.Conv3d(in_planes, midplanes, kernel_size=(1, 3, 3),
+                      stride=(1, stride, stride), padding=(0, padding, padding),
+                      bias=False),
+            nn.BatchNorm3d(midplanes),
+            nn.ReLU(inplace=True),
+            nn.Conv3d(midplanes, out_planes, kernel_size=(3, 1, 1),
+                      stride=(stride, 1, 1), padding=(padding, 0, 0),
+                      bias=False))
+
+    @staticmethod
+    def get_downsample_stride(stride):
+        return (stride, stride, stride)
+
+
+class Conv3DNoTemporal(nn.Conv3d):
+
+    def __init__(self,
+                 in_planes,
+                 out_planes,
+                 midplanes=None,
+                 stride=1,
+                 padding=1):
+
+        super(Conv3DNoTemporal, self).__init__(
+            in_channels=in_planes,
+            out_channels=out_planes,
+            kernel_size=(1, 3, 3),
+            stride=(1, stride, stride),
+            padding=(0, padding, padding),
+            bias=False)
+
+    @staticmethod
+    def get_downsample_stride(stride):
+        return (1, stride, stride)
+
+
+class BasicBlock(nn.Module):
+
+    expansion = 1
+
+    def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
+        midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
+
+        super(BasicBlock, self).__init__()
+        self.conv1 = nn.Sequential(
+            conv_builder(inplanes, planes, midplanes, stride),
+            nn.BatchNorm3d(planes),
+            nn.ReLU(inplace=True)
+        )
+        self.conv2 = nn.Sequential(
+            conv_builder(planes, planes, midplanes),
+            nn.BatchNorm3d(planes)
+        )
+        self.relu = nn.ReLU(inplace=True)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        residual = x
+
+        out = self.conv1(x)
+        out = self.conv2(out)
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out += residual
+        out = self.relu(out)
+
+        return out
+
+
+class Bottleneck(nn.Module):
+    expansion = 4
+
+    def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
+
+        super(Bottleneck, self).__init__()
+        midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
+
+        # 1x1x1
+        self.conv1 = nn.Sequential(
+            nn.Conv3d(inplanes, planes, kernel_size=1, bias=False),
+            nn.BatchNorm3d(planes),
+            nn.ReLU(inplace=True)
+        )
+        # Second kernel
+        self.conv2 = nn.Sequential(
+            conv_builder(planes, planes, midplanes, stride),
+            nn.BatchNorm3d(planes),
+            nn.ReLU(inplace=True)
+        )
+
+        # 1x1x1
+        self.conv3 = nn.Sequential(
+            nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False),
+            nn.BatchNorm3d(planes * self.expansion)
+        )
+        self.relu = nn.ReLU(inplace=True)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        residual = x
+
+        out = self.conv1(x)
+        out = self.conv2(out)
+        out = self.conv3(out)
+
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out += residual
+        out = self.relu(out)
+
+        return out
+
+
+class BasicStem(nn.Sequential):
+    """The default conv-batchnorm-relu stem
+    """
+    def __init__(self):
+        super(BasicStem, self).__init__(
+            nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2),
+                      padding=(1, 3, 3), bias=False),
+            nn.BatchNorm3d(64),
+            nn.ReLU(inplace=True))
+
+
+class R2Plus1dStem(nn.Sequential):
+    """R(2+1)D stem is different than the default one as it uses separated 3D convolution
+    """
+    def __init__(self):
+        super(R2Plus1dStem, self).__init__(
+            nn.Conv3d(3, 45, kernel_size=(1, 7, 7),
+                      stride=(1, 2, 2), padding=(0, 3, 3),
+                      bias=False),
+            nn.BatchNorm3d(45),
+            nn.ReLU(inplace=True),
+            nn.Conv3d(45, 64, kernel_size=(3, 1, 1),
+                      stride=(1, 1, 1), padding=(1, 0, 0),
+                      bias=False),
+            nn.BatchNorm3d(64),
+            nn.ReLU(inplace=True))
+
+
+class VideoResNet(nn.Module):
+
+    def __init__(self, block, conv_makers, layers,
+                 stem, num_classes=400,
+                 zero_init_residual=False):
+        """Generic resnet video generator.
+
+        Args:
+            block (nn.Module): resnet building block
+            conv_makers (list(functions)): generator function for each layer
+            layers (List[int]): number of blocks per layer
+            stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None.
+            num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
+            zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
+        """
+        super(VideoResNet, self).__init__()
+        self.inplanes = 64
+
+        self.stem = stem()
+
+        self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)
+        self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)
+        self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)
+        self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)
+
+        self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
+        self.fc = nn.Linear(512 * block.expansion, num_classes)
+
+        # init weights
+        self._initialize_weights()
+
+        if zero_init_residual:
+            for m in self.modules():
+                if isinstance(m, Bottleneck):
+                    nn.init.constant_(m.bn3.weight, 0)
+
+    def forward(self, x):
+        x = self.stem(x)
+
+        x = self.layer1(x)
+        x = self.layer2(x)
+        x = self.layer3(x)
+        x = self.layer4(x)
+
+        x = self.avgpool(x)
+        # Flatten the layer to fc
+        x = x.flatten(1)
+        x = self.fc(x)
+
+        return x
+
+    def _make_layer(self, block, conv_builder, planes, blocks, stride=1):
+        downsample = None
+
+        if stride != 1 or self.inplanes != planes * block.expansion:
+            ds_stride = conv_builder.get_downsample_stride(stride)
+            downsample = nn.Sequential(
+                nn.Conv3d(self.inplanes, planes * block.expansion,
+                          kernel_size=1, stride=ds_stride, bias=False),
+                nn.BatchNorm3d(planes * block.expansion)
+            )
+        layers = []
+        layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))
+
+        self.inplanes = planes * block.expansion
+        for i in range(1, blocks):
+            layers.append(block(self.inplanes, planes, conv_builder))
+
+        return nn.Sequential(*layers)
+
+    def _initialize_weights(self):
+        for m in self.modules():
+            if isinstance(m, nn.Conv3d):
+                nn.init.kaiming_normal_(m.weight, mode='fan_out',
+                                        nonlinearity='relu')
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.BatchNorm3d):
+                nn.init.constant_(m.weight, 1)
+                nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.Linear):
+                nn.init.normal_(m.weight, 0, 0.01)
+                nn.init.constant_(m.bias, 0)
+
+
+def _video_resnet(arch, pretrained=False, progress=True, **kwargs):
+    model = VideoResNet(**kwargs)
+
+    if pretrained:
+        state_dict = load_state_dict_from_url(model_urls[arch],
+                                              progress=progress)
+        model.load_state_dict(state_dict)
+    return model
+
+
+
[docs]def r3d_18(pretrained=False, progress=True, **kwargs): + """Construct 18 layer Resnet3D model as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: R3D-18 network + """ + + return _video_resnet('r3d_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv3DSimple] * 4, + layers=[2, 2, 2, 2], + stem=BasicStem, **kwargs)
+ + +
[docs]def mc3_18(pretrained=False, progress=True, **kwargs): + """Constructor for 18 layer Mixed Convolution network as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: MC3 Network definition + """ + return _video_resnet('mc3_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3, + layers=[2, 2, 2, 2], + stem=BasicStem, **kwargs)
+ + +
[docs]def r2plus1d_18(pretrained=False, progress=True, **kwargs): + """Constructor for the 18 layer deep R(2+1)D network as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: R(2+1)D-18 network + """ + return _video_resnet('r2plus1d_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv2Plus1D] * 4, + layers=[2, 2, 2, 2], + stem=R2Plus1dStem, **kwargs)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/boxes.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/boxes.html new file mode 100644 index 000000000000..3b68c237e12d --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/boxes.html @@ -0,0 +1,823 @@ + + + + + + + + + + + + torchvision.ops.boxes — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.boxes

+import torch
+from torch.jit.annotations import Tuple
+from torch import Tensor
+from ._box_convert import _box_cxcywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xywh_to_xyxy, _box_xyxy_to_xywh
+import torchvision
+from torchvision.extension import _assert_has_ops
+
+
+
[docs]def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: + """ + Performs non-maximum suppression (NMS) on the boxes according + to their intersection-over-union (IoU). + + NMS iteratively removes lower scoring boxes which have an + IoU greater than iou_threshold with another (higher scoring) + box. + + If multiple boxes have the exact same score and satisfy the IoU + criterion with respect to a reference box, the selected box is + not guaranteed to be the same between CPU and GPU. This is similar + to the behavior of argsort in PyTorch when repeated values are present. + + Parameters + ---------- + boxes : Tensor[N, 4]) + boxes to perform NMS on. They + are expected to be in (x1, y1, x2, y2) format + scores : Tensor[N] + scores for each one of the boxes + iou_threshold : float + discards all overlapping + boxes with IoU > iou_threshold + + Returns + ------- + keep : Tensor + int64 tensor with the indices + of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + _assert_has_ops() + return torch.ops.torchvision.nms(boxes, scores, iou_threshold)
+ + +
[docs]@torch.jit._script_if_tracing +def batched_nms( + boxes: Tensor, + scores: Tensor, + idxs: Tensor, + iou_threshold: float, +) -> Tensor: + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Parameters + ---------- + boxes : Tensor[N, 4] + boxes where NMS will be performed. They + are expected to be in (x1, y1, x2, y2) format + scores : Tensor[N] + scores for each one of the boxes + idxs : Tensor[N] + indices of the categories for each one of the boxes. + iou_threshold : float + discards all overlapping boxes + with IoU > iou_threshold + + Returns + ------- + keep : Tensor + int64 tensor with the indices of + the elements that have been kept by NMS, sorted + in decreasing order of scores + """ + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + # strategy: in order to perform NMS independently per class. + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + else: + max_coordinate = boxes.max() + offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) + boxes_for_nms = boxes + offsets[:, None] + keep = nms(boxes_for_nms, scores, iou_threshold) + return keep
+ + +
[docs]def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: + """ + Remove boxes which contains at least one side smaller than min_size. + + Arguments: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format + min_size (float): minimum size + + Returns: + keep (Tensor[K]): indices of the boxes that have both sides + larger than min_size + """ + ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] + keep = (ws >= min_size) & (hs >= min_size) + keep = torch.where(keep)[0] + return keep
+ + +
[docs]def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: + """ + Clip boxes so that they lie inside an image of size `size`. + + Arguments: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format + size (Tuple[height, width]): size of the image + + Returns: + clipped_boxes (Tensor[N, 4]) + """ + dim = boxes.dim() + boxes_x = boxes[..., 0::2] + boxes_y = boxes[..., 1::2] + height, width = size + + if torchvision._is_tracing(): + boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) + boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device)) + boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) + boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device)) + else: + boxes_x = boxes_x.clamp(min=0, max=width) + boxes_y = boxes_y.clamp(min=0, max=height) + + clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) + return clipped_boxes.reshape(boxes.shape)
+ + +
[docs]def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: + """ + Converts boxes from given in_fmt to out_fmt. + Supported in_fmt and out_fmt are: + + 'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right. + + 'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height. + + 'cxcywh' : boxes are represented via centre, width and height, cx, cy being center of box, w, h + being width and height. + + Arguments: + boxes (Tensor[N, 4]): boxes which will be converted. + in_fmt (str): Input format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh']. + out_fmt (str): Output format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh'] + + Returns: + boxes (Tensor[N, 4]): Boxes into converted format. + """ + + allowed_fmts = ("xyxy", "xywh", "cxcywh") + if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts: + raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") + + if in_fmt == out_fmt: + return boxes.clone() + + if in_fmt != 'xyxy' and out_fmt != 'xyxy': + # convert to xyxy and change in_fmt xyxy + if in_fmt == "xywh": + boxes = _box_xywh_to_xyxy(boxes) + elif in_fmt == "cxcywh": + boxes = _box_cxcywh_to_xyxy(boxes) + in_fmt = 'xyxy' + + if in_fmt == "xyxy": + if out_fmt == "xywh": + boxes = _box_xyxy_to_xywh(boxes) + elif out_fmt == "cxcywh": + boxes = _box_xyxy_to_cxcywh(boxes) + elif out_fmt == "xyxy": + if in_fmt == "xywh": + boxes = _box_xywh_to_xyxy(boxes) + elif in_fmt == "cxcywh": + boxes = _box_cxcywh_to_xyxy(boxes) + return boxes
+ + +
[docs]def box_area(boxes: Tensor) -> Tensor: + """ + Computes the area of a set of bounding boxes, which are specified by its + (x1, y1, x2, y2) coordinates. + + Arguments: + boxes (Tensor[N, 4]): boxes for which the area will be computed. They + are expected to be in (x1, y1, x2, y2) format + + Returns: + area (Tensor[N]): area for each box + """ + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+ + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +
[docs]def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + + Arguments: + boxes1 (Tensor[N, 4]) + boxes2 (Tensor[M, 4]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 + """ + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + iou = inter / (area1[:, None] + area2 - inter) + return iou
+ + +# Implementation adapted from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py +
[docs]def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: + """ + Return generalized intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + + Arguments: + boxes1 (Tensor[N, 4]) + boxes2 (Tensor[M, 4]) + + Returns: + generalized_iou (Tensor[N, M]): the NxM matrix containing the pairwise generalized_IoU values + for every element in boxes1 and boxes2 + """ + + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + + lti = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + areai = whi[:, :, 0] * whi[:, :, 1] + + return iou - (areai - union) / areai
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/deform_conv.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/deform_conv.html new file mode 100644 index 000000000000..7dca1bb314c4 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/deform_conv.html @@ -0,0 +1,709 @@ + + + + + + + + + + + + torchvision.ops.deform_conv — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.deform_conv

+import math
+
+import torch
+from torch import nn, Tensor
+from torch.nn import init
+from torch.nn.parameter import Parameter
+from torch.nn.modules.utils import _pair
+from torch.jit.annotations import Optional, Tuple
+from torchvision.extension import _assert_has_ops
+
+
+
[docs]def deform_conv2d( + input: Tensor, + offset: Tensor, + weight: Tensor, + bias: Optional[Tensor] = None, + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + dilation: Tuple[int, int] = (1, 1), +) -> Tensor: + """ + Performs Deformable Convolution, described in Deformable Convolutional Networks + + Arguments: + input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor + offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, + out_height, out_width]): offsets to be applied for each position in the + convolution kernel. + weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]): + convolution weights, split into groups of size (in_channels // groups) + bias (Tensor[out_channels]): optional bias of shape (out_channels,). Default: None + stride (int or Tuple[int, int]): distance between convolution centers. Default: 1 + padding (int or Tuple[int, int]): height/width of padding of zeroes around + each image. Default: 0 + dilation (int or Tuple[int, int]): the spacing between kernel elements. Default: 1 + + Returns: + output (Tensor[batch_sz, out_channels, out_h, out_w]): result of convolution + + + Examples:: + >>> input = torch.rand(4, 3, 10, 10) + >>> kh, kw = 3, 3 + >>> weight = torch.rand(5, 3, kh, kw) + >>> # offset should have the same spatial size as the output + >>> # of the convolution. In this case, for an input of 10, stride of 1 + >>> # and kernel size of 3, without padding, the output size is 8 + >>> offset = torch.rand(4, 2 * kh * kw, 8, 8) + >>> out = deform_conv2d(input, offset, weight) + >>> print(out.shape) + >>> # returns + >>> torch.Size([4, 5, 8, 8]) + """ + + _assert_has_ops() + out_channels = weight.shape[0] + if bias is None: + bias = torch.zeros(out_channels, device=input.device, dtype=input.dtype) + + stride_h, stride_w = _pair(stride) + pad_h, pad_w = _pair(padding) + dil_h, dil_w = _pair(dilation) + weights_h, weights_w = weight.shape[-2:] + _, n_in_channels, in_h, in_w = input.shape + + n_offset_grps = offset.shape[1] // (2 * weights_h * weights_w) + n_weight_grps = n_in_channels // weight.shape[1] + + if n_offset_grps == 0: + raise RuntimeError( + "the shape of the offset tensor at dimension 1 is not valid. It should " + "be a multiple of 2 * weight.size[2] * weight.size[3].\n" + "Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}".format( + offset.shape[1], 2 * weights_h * weights_w)) + + return torch.ops.torchvision.deform_conv2d( + input, + weight, + offset, + bias, + stride_h, stride_w, + pad_h, pad_w, + dil_h, dil_w, + n_weight_grps, + n_offset_grps)
+ + +
[docs]class DeformConv2d(nn.Module): + """ + See deform_conv2d + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + padding: int = 0, + dilation: int = 1, + groups: int = 1, + bias: bool = True, + ): + super(DeformConv2d, self).__init__() + + if in_channels % groups != 0: + raise ValueError('in_channels must be divisible by groups') + if out_channels % groups != 0: + raise ValueError('out_channels must be divisible by groups') + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + + self.weight = Parameter(torch.empty(out_channels, in_channels // groups, + self.kernel_size[0], self.kernel_size[1])) + + if bias: + self.bias = Parameter(torch.empty(out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self) -> None: + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + init.uniform_(self.bias, -bound, bound) + + def forward(self, input: Tensor, offset: Tensor) -> Tensor: + """ + Arguments: + input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor + offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, + out_height, out_width]): offsets to be applied for each position in the + convolution kernel. + """ + return deform_conv2d(input, offset, self.weight, self.bias, stride=self.stride, + padding=self.padding, dilation=self.dilation) + + def __repr__(self) -> str: + s = self.__class__.__name__ + '(' + s += '{in_channels}' + s += ', {out_channels}' + s += ', kernel_size={kernel_size}' + s += ', stride={stride}' + s += ', padding={padding}' if self.padding != (0, 0) else '' + s += ', dilation={dilation}' if self.dilation != (1, 1) else '' + s += ', groups={groups}' if self.groups != 1 else '' + s += ', bias=False' if self.bias is None else '' + s += ')' + return s.format(**self.__dict__)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/feature_pyramid_network.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/feature_pyramid_network.html new file mode 100644 index 000000000000..86f3d855bc72 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/feature_pyramid_network.html @@ -0,0 +1,761 @@ + + + + + + + + + + + + torchvision.ops.feature_pyramid_network — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.feature_pyramid_network

+from collections import OrderedDict
+
+import torch
+import torch.nn.functional as F
+from torch import nn, Tensor
+
+from torch.jit.annotations import Tuple, List, Dict, Optional
+
+
+class ExtraFPNBlock(nn.Module):
+    """
+    Base class for the extra block in the FPN.
+
+    Arguments:
+        results (List[Tensor]): the result of the FPN
+        x (List[Tensor]): the original feature maps
+        names (List[str]): the names for each one of the
+            original feature maps
+
+    Returns:
+        results (List[Tensor]): the extended set of results
+            of the FPN
+        names (List[str]): the extended set of names for the results
+    """
+    def forward(
+        self,
+        results: List[Tensor],
+        x: List[Tensor],
+        names: List[str],
+    ) -> Tuple[List[Tensor], List[str]]:
+        pass
+
+
+
[docs]class FeaturePyramidNetwork(nn.Module): + """ + Module that adds a FPN from on top of a set of feature maps. This is based on + `"Feature Pyramid Network for Object Detection" <https://arxiv.org/abs/1612.03144>`_. + + The feature maps are currently supposed to be in increasing depth + order. + + The input to the model is expected to be an OrderedDict[Tensor], containing + the feature maps on top of which the FPN will be added. + + Arguments: + in_channels_list (list[int]): number of channels for each feature map that + is passed to the module + out_channels (int): number of channels of the FPN representation + extra_blocks (ExtraFPNBlock or None): if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + + Examples:: + + >>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5) + >>> # get some dummy data + >>> x = OrderedDict() + >>> x['feat0'] = torch.rand(1, 10, 64, 64) + >>> x['feat2'] = torch.rand(1, 20, 16, 16) + >>> x['feat3'] = torch.rand(1, 30, 8, 8) + >>> # compute the FPN on top of x + >>> output = m(x) + >>> print([(k, v.shape) for k, v in output.items()]) + >>> # returns + >>> [('feat0', torch.Size([1, 5, 64, 64])), + >>> ('feat2', torch.Size([1, 5, 16, 16])), + >>> ('feat3', torch.Size([1, 5, 8, 8]))] + + """ + def __init__( + self, + in_channels_list: List[int], + out_channels: int, + extra_blocks: Optional[ExtraFPNBlock] = None, + ): + super(FeaturePyramidNetwork, self).__init__() + self.inner_blocks = nn.ModuleList() + self.layer_blocks = nn.ModuleList() + for in_channels in in_channels_list: + if in_channels == 0: + raise ValueError("in_channels=0 is currently not supported") + inner_block_module = nn.Conv2d(in_channels, out_channels, 1) + layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1) + self.inner_blocks.append(inner_block_module) + self.layer_blocks.append(layer_block_module) + + # initialize parameters now to avoid modifying the initialization of top_blocks + for m in self.children(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + if extra_blocks is not None: + assert isinstance(extra_blocks, ExtraFPNBlock) + self.extra_blocks = extra_blocks + + def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.inner_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = 0 + for m in self.inner_blocks: + num_blocks += 1 + if idx < 0: + idx += num_blocks + i = 0 + out = x + for module in self.inner_blocks: + if i == idx: + out = module(x) + i += 1 + return out + + def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.layer_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = 0 + for m in self.layer_blocks: + num_blocks += 1 + if idx < 0: + idx += num_blocks + i = 0 + out = x + for module in self.layer_blocks: + if i == idx: + out = module(x) + i += 1 + return out + + def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]: + """ + Computes the FPN for a set of feature maps. + + Arguments: + x (OrderedDict[Tensor]): feature maps for each feature level. + + Returns: + results (OrderedDict[Tensor]): feature maps after FPN layers. + They are ordered from highest resolution first. + """ + # unpack OrderedDict into two lists for easier handling + names = list(x.keys()) + x = list(x.values()) + + last_inner = self.get_result_from_inner_blocks(x[-1], -1) + results = [] + results.append(self.get_result_from_layer_blocks(last_inner, -1)) + + for idx in range(len(x) - 2, -1, -1): + inner_lateral = self.get_result_from_inner_blocks(x[idx], idx) + feat_shape = inner_lateral.shape[-2:] + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest") + last_inner = inner_lateral + inner_top_down + results.insert(0, self.get_result_from_layer_blocks(last_inner, idx)) + + if self.extra_blocks is not None: + results, names = self.extra_blocks(results, x, names) + + # make it back an OrderedDict + out = OrderedDict([(k, v) for k, v in zip(names, results)]) + + return out
+ + +class LastLevelMaxPool(ExtraFPNBlock): + """ + Applies a max_pool2d on top of the last feature map + """ + def forward( + self, + x: List[Tensor], + y: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + names.append("pool") + x.append(F.max_pool2d(x[-1], 1, 2, 0)) + return x, names + + +class LastLevelP6P7(ExtraFPNBlock): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7. + """ + def __init__(self, in_channels: int, out_channels: int): + super(LastLevelP6P7, self).__init__() + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + nn.init.kaiming_uniform_(module.weight, a=1) + nn.init.constant_(module.bias, 0) + self.use_P5 = in_channels == out_channels + + def forward( + self, + p: List[Tensor], + c: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + p5, c5 = p[-1], c[-1] + x = p5 if self.use_P5 else c5 + p6 = self.p6(x) + p7 = self.p7(F.relu(p6)) + p.extend([p6, p7]) + names.extend(["p6", "p7"]) + return p, names +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/poolers.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/poolers.html new file mode 100644 index 000000000000..142f7827868e --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/poolers.html @@ -0,0 +1,812 @@ + + + + + + + + + + + + torchvision.ops.poolers — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.poolers

+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+from typing import Union
+
+import torch
+import torch.nn.functional as F
+from torch import nn, Tensor
+
+from torchvision.ops import roi_align
+from torchvision.ops.boxes import box_area
+
+from torch.jit.annotations import Optional, List, Dict, Tuple
+import torchvision
+
+
+# copying result_idx_in_level to a specific index in result[]
+# is not supported by ONNX tracing yet.
+# _onnx_merge_levels() is an implementation supported by ONNX
+# that merges the levels to the right indices
+@torch.jit.unused
+def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor:
+    first_result = unmerged_results[0]
+    dtype, device = first_result.dtype, first_result.device
+    res = torch.zeros((levels.size(0), first_result.size(1),
+                       first_result.size(2), first_result.size(3)),
+                      dtype=dtype, device=device)
+    for level in range(len(unmerged_results)):
+        index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
+        index = index.expand(index.size(0),
+                             unmerged_results[level].size(1),
+                             unmerged_results[level].size(2),
+                             unmerged_results[level].size(3))
+        res = res.scatter(0, index, unmerged_results[level])
+    return res
+
+
+# TODO: (eellison) T54974082 https://github.com/pytorch/pytorch/issues/26744/pytorch/issues/26744
+def initLevelMapper(
+    k_min: int,
+    k_max: int,
+    canonical_scale: int = 224,
+    canonical_level: int = 4,
+    eps: float = 1e-6,
+):
+    return LevelMapper(k_min, k_max, canonical_scale, canonical_level, eps)
+
+
+class LevelMapper(object):
+    """Determine which FPN level each RoI in a set of RoIs should map to based
+    on the heuristic in the FPN paper.
+
+    Arguments:
+        k_min (int)
+        k_max (int)
+        canonical_scale (int)
+        canonical_level (int)
+        eps (float)
+    """
+
+    def __init__(
+        self,
+        k_min: int,
+        k_max: int,
+        canonical_scale: int = 224,
+        canonical_level: int = 4,
+        eps: float = 1e-6,
+    ):
+        self.k_min = k_min
+        self.k_max = k_max
+        self.s0 = canonical_scale
+        self.lvl0 = canonical_level
+        self.eps = eps
+
+    def __call__(self, boxlists: List[Tensor]) -> Tensor:
+        """
+        Arguments:
+            boxlists (list[BoxList])
+        """
+        # Compute level ids
+        s = torch.sqrt(torch.cat([box_area(boxlist) for boxlist in boxlists]))
+
+        # Eqn.(1) in FPN paper
+        target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0) + torch.tensor(self.eps, dtype=s.dtype))
+        target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
+        return (target_lvls.to(torch.int64) - self.k_min).to(torch.int64)
+
+
+
[docs]class MultiScaleRoIAlign(nn.Module): + """ + Multi-scale RoIAlign pooling, which is useful for detection with or without FPN. + + It infers the scale of the pooling via the heuristics present in the FPN paper. + + Arguments: + featmap_names (List[str]): the names of the feature maps that will be used + for the pooling. + output_size (List[Tuple[int, int]] or List[int]): output size for the pooled region + sampling_ratio (int): sampling ratio for ROIAlign + + Examples:: + + >>> m = torchvision.ops.MultiScaleRoIAlign(['feat1', 'feat3'], 3, 2) + >>> i = OrderedDict() + >>> i['feat1'] = torch.rand(1, 5, 64, 64) + >>> i['feat2'] = torch.rand(1, 5, 32, 32) # this feature won't be used in the pooling + >>> i['feat3'] = torch.rand(1, 5, 16, 16) + >>> # create some random bounding boxes + >>> boxes = torch.rand(6, 4) * 256; boxes[:, 2:] += boxes[:, :2] + >>> # original image size, before computing the feature maps + >>> image_sizes = [(512, 512)] + >>> output = m(i, [boxes], image_sizes) + >>> print(output.shape) + >>> torch.Size([6, 5, 3, 3]) + + """ + + __annotations__ = { + 'scales': Optional[List[float]], + 'map_levels': Optional[LevelMapper] + } + + def __init__( + self, + featmap_names: List[str], + output_size: Union[int, Tuple[int], List[int]], + sampling_ratio: int, + ): + super(MultiScaleRoIAlign, self).__init__() + if isinstance(output_size, int): + output_size = (output_size, output_size) + self.featmap_names = featmap_names + self.sampling_ratio = sampling_ratio + self.output_size = tuple(output_size) + self.scales = None + self.map_levels = None + + def convert_to_roi_format(self, boxes: List[Tensor]) -> Tensor: + concat_boxes = torch.cat(boxes, dim=0) + device, dtype = concat_boxes.device, concat_boxes.dtype + ids = torch.cat( + [ + torch.full_like(b[:, :1], i, dtype=dtype, layout=torch.strided, device=device) + for i, b in enumerate(boxes) + ], + dim=0, + ) + rois = torch.cat([ids, concat_boxes], dim=1) + return rois + + def infer_scale(self, feature: Tensor, original_size: List[int]) -> float: + # assumption: the scale is of the form 2 ** (-k), with k integer + size = feature.shape[-2:] + possible_scales = torch.jit.annotate(List[float], []) + for s1, s2 in zip(size, original_size): + approx_scale = float(s1) / float(s2) + scale = 2 ** float(torch.tensor(approx_scale).log2().round()) + possible_scales.append(scale) + assert possible_scales[0] == possible_scales[1] + return possible_scales[0] + + def setup_scales( + self, + features: List[Tensor], + image_shapes: List[Tuple[int, int]], + ) -> None: + assert len(image_shapes) != 0 + max_x = 0 + max_y = 0 + for shape in image_shapes: + max_x = max(shape[0], max_x) + max_y = max(shape[1], max_y) + original_input_shape = (max_x, max_y) + + scales = [self.infer_scale(feat, original_input_shape) for feat in features] + # get the levels in the feature map by leveraging the fact that the network always + # downsamples by a factor of 2 at each level. + lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item() + lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item() + self.scales = scales + self.map_levels = initLevelMapper(int(lvl_min), int(lvl_max)) + + def forward( + self, + x: Dict[str, Tensor], + boxes: List[Tensor], + image_shapes: List[Tuple[int, int]], + ) -> Tensor: + """ + Arguments: + x (OrderedDict[Tensor]): feature maps for each level. They are assumed to have + all the same number of channels, but they can have different sizes. + boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in + (x1, y1, x2, y2) format and in the image reference size, not the feature map + reference. + image_shapes (List[Tuple[height, width]]): the sizes of each image before they + have been fed to a CNN to obtain feature maps. This allows us to infer the + scale factor for each one of the levels to be pooled. + Returns: + result (Tensor) + """ + x_filtered = [] + for k, v in x.items(): + if k in self.featmap_names: + x_filtered.append(v) + num_levels = len(x_filtered) + rois = self.convert_to_roi_format(boxes) + if self.scales is None: + self.setup_scales(x_filtered, image_shapes) + + scales = self.scales + assert scales is not None + + if num_levels == 1: + return roi_align( + x_filtered[0], rois, + output_size=self.output_size, + spatial_scale=scales[0], + sampling_ratio=self.sampling_ratio + ) + + mapper = self.map_levels + assert mapper is not None + + levels = mapper(boxes) + + num_rois = len(rois) + num_channels = x_filtered[0].shape[1] + + dtype, device = x_filtered[0].dtype, x_filtered[0].device + result = torch.zeros( + (num_rois, num_channels,) + self.output_size, + dtype=dtype, + device=device, + ) + + tracing_results = [] + for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): + idx_in_level = torch.where(levels == level)[0] + rois_per_level = rois[idx_in_level] + + result_idx_in_level = roi_align( + per_level_feature, rois_per_level, + output_size=self.output_size, + spatial_scale=scale, sampling_ratio=self.sampling_ratio) + + if torchvision._is_tracing(): + tracing_results.append(result_idx_in_level.to(dtype)) + else: + # result and result_idx_in_level's dtypes are based on dtypes of different + # elements in x_filtered. x_filtered contains tensors output by different + # layers. When autocast is active, it may choose different dtypes for + # different layers' outputs. Therefore, we defensively match result's dtype + # before copying elements from result_idx_in_level in the following op. + # We need to cast manually (can't rely on autocast to cast for us) because + # the op acts on result in-place, and autocast only affects out-of-place ops. + result[idx_in_level] = result_idx_in_level.to(result.dtype) + + if torchvision._is_tracing(): + result = _onnx_merge_levels(levels, tracing_results) + + return result
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/ps_roi_align.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/ps_roi_align.html new file mode 100644 index 000000000000..f0e1da153a12 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/ps_roi_align.html @@ -0,0 +1,632 @@ + + + + + + + + + + + + torchvision.ops.ps_roi_align — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.ps_roi_align

+import torch
+from torch import nn, Tensor
+
+from torch.nn.modules.utils import _pair
+from torch.jit.annotations import List, Tuple
+
+from torchvision.extension import _assert_has_ops
+from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
+
+
+
[docs]def ps_roi_align( + input: Tensor, + boxes: Tensor, + output_size: int, + spatial_scale: float = 1.0, + sampling_ratio: int = -1, +) -> Tensor: + """ + Performs Position-Sensitive Region of Interest (RoI) Align operator + mentioned in Light-Head R-CNN. + + Arguments: + input (Tensor[N, C, H, W]): input tensor + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. If a single Tensor is passed, + then the first column should contain the batch index. If a list of Tensors + is passed, then each Tensor will correspond to the boxes for an element i + in a batch + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + sampling_ratio (int): number of sampling points in the interpolation grid + used to compute the output value of each pooled output bin. If > 0 + then exactly sampling_ratio x sampling_ratio grid points are used. + If <= 0, then an adaptive number of grid points are used (computed as + ceil(roi_width / pooled_w), and likewise for height). Default: -1 + + Returns: + output (Tensor[K, C, output_size[0], output_size[1]]) + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + output, _ = torch.ops.torchvision.ps_roi_align(input, rois, spatial_scale, + output_size[0], + output_size[1], + sampling_ratio) + return output
+ + +
[docs]class PSRoIAlign(nn.Module): + """ + See ps_roi_align + """ + def __init__( + self, + output_size: int, + spatial_scale: float, + sampling_ratio: int, + ): + super(PSRoIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return ps_roi_align(input, rois, self.output_size, self.spatial_scale, + self.sampling_ratio) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) + tmpstr += ')' + return tmpstr
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/ps_roi_pool.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/ps_roi_pool.html new file mode 100644 index 000000000000..467a5f9a4d01 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/ps_roi_pool.html @@ -0,0 +1,617 @@ + + + + + + + + + + + + torchvision.ops.ps_roi_pool — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.ps_roi_pool

+import torch
+from torch import nn, Tensor
+
+from torch.nn.modules.utils import _pair
+from torch.jit.annotations import List, Tuple
+
+from torchvision.extension import _assert_has_ops
+from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
+
+
+
[docs]def ps_roi_pool( + input: Tensor, + boxes: Tensor, + output_size: int, + spatial_scale: float = 1.0, +) -> Tensor: + """ + Performs Position-Sensitive Region of Interest (RoI) Pool operator + described in R-FCN + + Arguments: + input (Tensor[N, C, H, W]): input tensor + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. If a single Tensor is passed, + then the first column should contain the batch index. If a list of Tensors + is passed, then each Tensor will correspond to the boxes for an element i + in a batch + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + + Returns: + output (Tensor[K, C, output_size[0], output_size[1]]) + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale, + output_size[0], + output_size[1]) + return output
+ + +
[docs]class PSRoIPool(nn.Module): + """ + See ps_roi_pool + """ + def __init__(self, output_size: int, spatial_scale: float): + super(PSRoIPool, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return ps_roi_pool(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ')' + return tmpstr
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/roi_align.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/roi_align.html new file mode 100644 index 000000000000..3b61f64ad930 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/roi_align.html @@ -0,0 +1,635 @@ + + + + + + + + + + + + torchvision.ops.roi_align — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.roi_align

+import torch
+from torch import nn, Tensor
+
+from torch.nn.modules.utils import _pair
+from torch.jit.annotations import List, BroadcastingList2
+
+from torchvision.extension import _assert_has_ops
+from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
+
+
+
[docs]def roi_align( + input: Tensor, + boxes: Tensor, + output_size: BroadcastingList2[int], + spatial_scale: float = 1.0, + sampling_ratio: int = -1, + aligned: bool = False, +) -> Tensor: + """ + Performs Region of Interest (RoI) Align operator described in Mask R-CNN + + Arguments: + input (Tensor[N, C, H, W]): input tensor + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. If a single Tensor is passed, + then the first column should contain the batch index. If a list of Tensors + is passed, then each Tensor will correspond to the boxes for an element i + in a batch + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + sampling_ratio (int): number of sampling points in the interpolation grid + used to compute the output value of each pooled output bin. If > 0, + then exactly sampling_ratio x sampling_ratio grid points are used. If + <= 0, then an adaptive number of grid points are used (computed as + ceil(roi_width / pooled_w), and likewise for height). Default: -1 + aligned (bool): If False, use the legacy implementation. + If True, pixel shift it by -0.5 for align more perfectly about two neighboring pixel indices. + This version in Detectron2 + + Returns: + output (Tensor[K, C, output_size[0], output_size[1]]) + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + return torch.ops.torchvision.roi_align(input, rois, spatial_scale, + output_size[0], output_size[1], + sampling_ratio, aligned)
+ + +
[docs]class RoIAlign(nn.Module): + """ + See roi_align + """ + def __init__( + self, + output_size: BroadcastingList2[int], + spatial_scale: float, + sampling_ratio: int, + aligned: bool = False, + ): + super(RoIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) + tmpstr += ', aligned=' + str(self.aligned) + tmpstr += ')' + return tmpstr
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/ops/roi_pool.html b/docs/1.7.0/torchvision/_modules/torchvision/ops/roi_pool.html new file mode 100644 index 000000000000..6cf0c52027bb --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/ops/roi_pool.html @@ -0,0 +1,615 @@ + + + + + + + + + + + + torchvision.ops.roi_pool — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.ops.roi_pool

+import torch
+from torch import nn, Tensor
+
+from torch.nn.modules.utils import _pair
+from torch.jit.annotations import List, BroadcastingList2
+
+from torchvision.extension import _assert_has_ops
+from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
+
+
+
[docs]def roi_pool( + input: Tensor, + boxes: Tensor, + output_size: BroadcastingList2[int], + spatial_scale: float = 1.0, +) -> Tensor: + """ + Performs Region of Interest (RoI) Pool operator described in Fast R-CNN + + Arguments: + input (Tensor[N, C, H, W]): input tensor + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. If a single Tensor is passed, + then the first column should contain the batch index. If a list of Tensors + is passed, then each Tensor will correspond to the boxes for an element i + in a batch + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + + Returns: + output (Tensor[K, C, output_size[0], output_size[1]]) + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, + output_size[0], output_size[1]) + return output
+ + +
[docs]class RoIPool(nn.Module): + """ + See roi_pool + """ + def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float): + super(RoIPool, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return roi_pool(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ')' + return tmpstr
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/transforms/functional.html b/docs/1.7.0/torchvision/_modules/torchvision/transforms/functional.html new file mode 100644 index 000000000000..81bb72b6b32e --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/transforms/functional.html @@ -0,0 +1,1602 @@ + + + + + + + + + + + + torchvision.transforms.functional — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.transforms.functional

+import math
+import numbers
+import warnings
+from typing import Any, Optional
+
+import numpy as np
+from PIL import Image
+
+import torch
+from torch import Tensor
+from torch.jit.annotations import List, Tuple
+
+try:
+    import accimage
+except ImportError:
+    accimage = None
+
+from . import functional_pil as F_pil
+from . import functional_tensor as F_t
+
+
+_is_pil_image = F_pil._is_pil_image
+_parse_fill = F_pil._parse_fill
+
+
+def _get_image_size(img: Tensor) -> List[int]:
+    """Returns image sizea as (w, h)
+    """
+    if isinstance(img, torch.Tensor):
+        return F_t._get_image_size(img)
+
+    return F_pil._get_image_size(img)
+
+
+def _get_image_num_channels(img: Tensor) -> int:
+    if isinstance(img, torch.Tensor):
+        return F_t._get_image_num_channels(img)
+
+    return F_pil._get_image_num_channels(img)
+
+
+@torch.jit.unused
+def _is_numpy(img: Any) -> bool:
+    return isinstance(img, np.ndarray)
+
+
+@torch.jit.unused
+def _is_numpy_image(img: Any) -> bool:
+    return img.ndim in {2, 3}
+
+
+
[docs]def to_tensor(pic): + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + + See ``ToTensor`` for more details. + + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not(F_pil._is_pil_image(pic) or _is_numpy(pic)): + raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic))) + + if _is_numpy(pic) and not _is_numpy_image(pic): + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim)) + + if isinstance(pic, np.ndarray): + # handle numpy array + if pic.ndim == 2: + pic = pic[:, :, None] + + img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous() + # backward compatibility + if isinstance(img, torch.ByteTensor): + return img.float().div(255) + else: + return img + + if accimage is not None and isinstance(pic, accimage.Image): + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) + pic.copyto(nppic) + return torch.from_numpy(nppic) + + # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + elif pic.mode == 'F': + img = torch.from_numpy(np.array(pic, np.float32, copy=False)) + elif pic.mode == '1': + img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False)) + else: + img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) + + img = img.view(pic.size[1], pic.size[0], len(pic.getbands())) + # put it from HWC to CHW format + img = img.permute((2, 0, 1)).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float().div(255) + else: + return img
+ + +
[docs]def pil_to_tensor(pic): + """Convert a ``PIL Image`` to a tensor of the same type. + + See :class:`~torchvision.transforms.PILToTensor` for more details. + + Args: + pic (PIL Image): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not F_pil._is_pil_image(pic): + raise TypeError('pic should be PIL Image. Got {}'.format(type(pic))) + + if accimage is not None and isinstance(pic, accimage.Image): + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) + pic.copyto(nppic) + return torch.as_tensor(nppic) + + # handle PIL Image + img = torch.as_tensor(np.asarray(pic)) + img = img.view(pic.size[1], pic.size[0], len(pic.getbands())) + # put it from HWC to CHW format + img = img.permute((2, 0, 1)) + return img
+ + +
[docs]def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: + """Convert a tensor image to the given ``dtype`` and scale the values accordingly + + Args: + image (torch.Tensor): Image to be converted + dtype (torch.dtype): Desired data type of the output + + Returns: + (torch.Tensor): Converted image + + .. note:: + + When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly. + If converted back and forth, this mismatch has no effect. + + Raises: + RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as + well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to + overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range + of the integer ``dtype``. + """ + if not isinstance(image, torch.Tensor): + raise TypeError('Input img should be Tensor Image') + + return F_t.convert_image_dtype(image, dtype)
+ + +
[docs]def to_pil_image(pic, mode=None): + """Convert a tensor or an ndarray to PIL Image. + + See :class:`~torchvision.transforms.ToPILImage` for more details. + + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + + Returns: + PIL Image: Image converted to PIL Image. + """ + if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): + raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic))) + + elif isinstance(pic, torch.Tensor): + if pic.ndimension() not in {2, 3}: + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension())) + + elif pic.ndimension() == 2: + # if 2D image, add channel dimension (CHW) + pic = pic.unsqueeze(0) + + elif isinstance(pic, np.ndarray): + if pic.ndim not in {2, 3}: + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim)) + + elif pic.ndim == 2: + # if 2D image, add channel dimension (HWC) + pic = np.expand_dims(pic, 2) + + npimg = pic + if isinstance(pic, torch.Tensor): + if pic.is_floating_point() and mode != 'F': + pic = pic.mul(255).byte() + npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0)) + + if not isinstance(npimg, np.ndarray): + raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' + + 'not {}'.format(type(npimg))) + + if npimg.shape[2] == 1: + expected_mode = None + npimg = npimg[:, :, 0] + if npimg.dtype == np.uint8: + expected_mode = 'L' + elif npimg.dtype == np.int16: + expected_mode = 'I;16' + elif npimg.dtype == np.int32: + expected_mode = 'I' + elif npimg.dtype == np.float32: + expected_mode = 'F' + if mode is not None and mode != expected_mode: + raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}" + .format(mode, np.dtype, expected_mode)) + mode = expected_mode + + elif npimg.shape[2] == 2: + permitted_2_channel_modes = ['LA'] + if mode is not None and mode not in permitted_2_channel_modes: + raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes)) + + if mode is None and npimg.dtype == np.uint8: + mode = 'LA' + + elif npimg.shape[2] == 4: + permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX'] + if mode is not None and mode not in permitted_4_channel_modes: + raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes)) + + if mode is None and npimg.dtype == np.uint8: + mode = 'RGBA' + else: + permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV'] + if mode is not None and mode not in permitted_3_channel_modes: + raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes)) + if mode is None and npimg.dtype == np.uint8: + mode = 'RGB' + + if mode is None: + raise TypeError('Input type {} is not supported'.format(npimg.dtype)) + + return Image.fromarray(npimg, mode=mode)
+ + +
[docs]def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor: + """Normalize a tensor image with mean and standard deviation. + + .. note:: + This transform acts out of place by default, i.e., it does not mutates the input tensor. + + See :class:`~torchvision.transforms.Normalize` for more details. + + Args: + tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized. + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation inplace. + + Returns: + Tensor: Normalized Tensor image. + """ + if not isinstance(tensor, torch.Tensor): + raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor))) + + if tensor.ndim < 3: + raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = ' + '{}.'.format(tensor.size())) + + if not inplace: + tensor = tensor.clone() + + dtype = tensor.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) + std = torch.as_tensor(std, dtype=dtype, device=tensor.device) + if (std == 0).any(): + raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype)) + if mean.ndim == 1: + mean = mean.view(-1, 1, 1) + if std.ndim == 1: + std = std.view(-1, 1, 1) + tensor.sub_(mean).div_(std) + return tensor
+ + +
[docs]def resize(img: Tensor, size: List[int], interpolation: int = Image.BILINEAR) -> Tensor: + r"""Resize the input image to the given size. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + img (PIL Image or Tensor): Image to be resized. + size (sequence or int): Desired output size. If size is a sequence like + (h, w), the output size will be matched to this. If size is an int, + the smaller edge of the image will be matched to this number maintaining + the aspect ratio. i.e, if height > width, then image will be rescaled to + :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. + In torchscript mode size as single int is not supported, use a tuple or + list of length 1: ``[size, ]``. + interpolation (int, optional): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. + + Returns: + PIL Image or Tensor: Resized image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.resize(img, size=size, interpolation=interpolation) + + return F_t.resize(img, size=size, interpolation=interpolation)
+ + +def scale(*args, **kwargs): + warnings.warn("The use of the transforms.Scale transform is deprecated, " + + "please use transforms.Resize instead.") + return resize(*args, **kwargs) + + +
[docs]def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor: + r"""Pad the given image on all sides with the given "pad" value. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + img (PIL Image or Tensor): Image to be padded. + padding (int or tuple or list): Padding on each border. If a single int is provided this + is used to pad all borders. If tuple of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a tuple of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + In torchscript mode padding as single int is not supported, use a tuple or + list of length 1: ``[padding, ]``. + fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant. Only int value is supported for Tensors. + padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. + Mode symmetric is not yet supported for Tensor inputs. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value on the edge of the image + + - reflect: pads with reflection of image (without repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image (repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + + Returns: + PIL Image or Tensor: Padded image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) + + return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
+ + +
[docs]def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: + """Crop the given image at specified location and output size. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + top (int): Vertical component of the top left corner of the crop box. + left (int): Horizontal component of the top left corner of the crop box. + height (int): Height of the crop box. + width (int): Width of the crop box. + + Returns: + PIL Image or Tensor: Cropped image. + """ + + if not isinstance(img, torch.Tensor): + return F_pil.crop(img, top, left, height, width) + + return F_t.crop(img, top, left, height, width)
+ + +
[docs]def center_crop(img: Tensor, output_size: List[int]) -> Tensor: + """Crops the given image at the center. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + img (PIL Image or Tensor): Image to be cropped. + output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int + it is used for both directions. + + Returns: + PIL Image or Tensor: Cropped image. + """ + if isinstance(output_size, numbers.Number): + output_size = (int(output_size), int(output_size)) + elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: + output_size = (output_size[0], output_size[0]) + + image_width, image_height = _get_image_size(img) + crop_height, crop_width = output_size + + # crop_top = int(round((image_height - crop_height) / 2.)) + # Result can be different between python func and scripted func + # Temporary workaround: + crop_top = int((image_height - crop_height + 1) * 0.5) + # crop_left = int(round((image_width - crop_width) / 2.)) + # Result can be different between python func and scripted func + # Temporary workaround: + crop_left = int((image_width - crop_width + 1) * 0.5) + return crop(img, crop_top, crop_left, crop_height, crop_width)
+ + +
[docs]def resized_crop( + img: Tensor, top: int, left: int, height: int, width: int, size: List[int], interpolation: int = Image.BILINEAR +) -> Tensor: + """Crop the given image and resize it to desired size. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Notably used in :class:`~torchvision.transforms.RandomResizedCrop`. + + Args: + img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + top (int): Vertical component of the top left corner of the crop box. + left (int): Horizontal component of the top left corner of the crop box. + height (int): Height of the crop box. + width (int): Width of the crop box. + size (sequence or int): Desired output size. Same semantics as ``resize``. + interpolation (int, optional): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. + Returns: + PIL Image or Tensor: Cropped image. + """ + img = crop(img, top, left, height, width) + img = resize(img, size, interpolation) + return img
+ + +
[docs]def hflip(img: Tensor) -> Tensor: + """Horizontally flip the given PIL Image or Tensor. + + Args: + img (PIL Image or Tensor): Image to be flipped. If img + is a Tensor, it is expected to be in [..., H, W] format, + where ... means it can have an arbitrary number of trailing + dimensions. + + Returns: + PIL Image or Tensor: Horizontally flipped image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.hflip(img) + + return F_t.hflip(img)
+ + +def _get_perspective_coeffs( + startpoints: List[List[int]], endpoints: List[List[int]] +) -> List[float]: + """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. + + In Perspective Transform each pixel (x, y) in the original image gets transformed as, + (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) + + Args: + startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the original image. + endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image. + + Returns: + octuple (a, b, c, d, e, f, g, h) for transforming each pixel. + """ + a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float) + + for i, (p1, p2) in enumerate(zip(endpoints, startpoints)): + a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) + a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) + + b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8) + res = torch.lstsq(b_matrix, a_matrix)[0] + + output: List[float] = res.squeeze(1).tolist() + return output + + +
[docs]def perspective( + img: Tensor, + startpoints: List[List[int]], + endpoints: List[List[int]], + interpolation: int = 2, + fill: Optional[int] = None +) -> Tensor: + """Perform perspective transform of the given image. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): Image to be transformed. + startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the original image. + endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image. + interpolation (int): Interpolation type. If input is Tensor, only ``PIL.Image.NEAREST`` and + ``PIL.Image.BILINEAR`` are supported. Default, ``PIL.Image.BILINEAR`` for PIL images and Tensors. + fill (n-tuple or int or float): Pixel fill value for area outside the rotated + image. If int or float, the value is used for all bands respectively. + This option is only available for ``pillow>=5.0.0``. This option is not supported for Tensor + input. Fill value for the area outside the transform in the output image is always 0. + + Returns: + PIL Image or Tensor: transformed Image. + """ + + coeffs = _get_perspective_coeffs(startpoints, endpoints) + + if not isinstance(img, torch.Tensor): + return F_pil.perspective(img, coeffs, interpolation=interpolation, fill=fill) + + return F_t.perspective(img, coeffs, interpolation=interpolation, fill=fill)
+ + +
[docs]def vflip(img: Tensor) -> Tensor: + """Vertically flip the given PIL Image or torch Tensor. + + Args: + img (PIL Image or Tensor): Image to be flipped. If img + is a Tensor, it is expected to be in [..., H, W] format, + where ... means it can have an arbitrary number of trailing + dimensions. + + Returns: + PIL Image: Vertically flipped image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.vflip(img) + + return F_t.vflip(img)
+ + +
[docs]def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + """Crop the given image into four corners and the central crop. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + img (PIL Image or Tensor): Image to be cropped. + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + + Returns: + tuple: tuple (tl, tr, bl, br, center) + Corresponding top left, top right, bottom left, bottom right and center crop. + """ + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + elif isinstance(size, (tuple, list)) and len(size) == 1: + size = (size[0], size[0]) + + if len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + + image_width, image_height = _get_image_size(img) + crop_height, crop_width = size + if crop_width > image_width or crop_height > image_height: + msg = "Requested crop size {} is bigger than input size {}" + raise ValueError(msg.format(size, (image_height, image_width))) + + tl = crop(img, 0, 0, crop_height, crop_width) + tr = crop(img, 0, image_width - crop_width, crop_height, crop_width) + bl = crop(img, image_height - crop_height, 0, crop_height, crop_width) + br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width) + + center = center_crop(img, [crop_height, crop_width]) + + return tl, tr, bl, br, center
+ + +
[docs]def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]: + """Generate ten cropped images from the given image. + Crop the given image into four corners and the central crop plus the + flipped version of these (horizontal flipping is used by default). + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + img (PIL Image or Tensor): Image to be cropped. + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + vertical_flip (bool): Use vertical flipping instead of horizontal + + Returns: + tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip) + Corresponding top left, top right, bottom left, bottom right and + center crop and same for the flipped image. + """ + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + elif isinstance(size, (tuple, list)) and len(size) == 1: + size = (size[0], size[0]) + + if len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + + first_five = five_crop(img, size) + + if vertical_flip: + img = vflip(img) + else: + img = hflip(img) + + second_five = five_crop(img, size) + return first_five + second_five
+ + +
[docs]def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: + """Adjust brightness of an Image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + brightness_factor (float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + + Returns: + PIL Image or Tensor: Brightness adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_brightness(img, brightness_factor) + + return F_t.adjust_brightness(img, brightness_factor)
+ + +
[docs]def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: + """Adjust contrast of an Image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + PIL Image or Tensor: Contrast adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_contrast(img, contrast_factor) + + return F_t.adjust_contrast(img, contrast_factor)
+ + +
[docs]def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: + """Adjust color saturation of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + PIL Image or Tensor: Saturation adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_saturation(img, saturation_factor) + + return F_t.adjust_saturation(img, saturation_factor)
+ + +
[docs]def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: + """Adjust hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + See `Hue`_ for more details. + + .. _Hue: https://en.wikipedia.org/wiki/Hue + + Args: + img (PIL Image or Tensor): Image to be adjusted. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL Image or Tensor: Hue adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_hue(img, hue_factor) + + return F_t.adjust_hue(img, hue_factor)
+ + +
[docs]def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: + r"""Perform gamma correction on an image. + + Also known as Power Law Transform. Intensities in RGB mode are adjusted + based on the following equation: + + .. math:: + I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} + + See `Gamma Correction`_ for more details. + + .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction + + Args: + img (PIL Image or Tensor): PIL Image to be adjusted. + gamma (float): Non negative real number, same as :math:`\gamma` in the equation. + gamma larger than 1 make the shadows darker, + while gamma smaller than 1 make dark regions lighter. + gain (float): The constant multiplier. + Returns: + PIL Image or Tensor: Gamma correction adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_gamma(img, gamma, gain) + + return F_t.adjust_gamma(img, gamma, gain)
+ + +def _get_inverse_affine_matrix( + center: List[float], angle: float, translate: List[float], scale: float, shear: List[float] +) -> List[float]: + # Helper method to compute inverse matrix for affine transformation + + # As it is explained in PIL.Image.rotate + # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1 + # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1] + # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1] + # RSS is rotation with scale and shear matrix + # RSS(a, s, (sx, sy)) = + # = R(a) * S(s) * SHy(sy) * SHx(sx) + # = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ] + # [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ] + # [ 0 , 0 , 1 ] + # + # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears: + # SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0] + # [0, 1 ] [-tan(s), 1] + # + # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1 + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + + return matrix + + +
[docs]def rotate( + img: Tensor, angle: float, resample: int = 0, expand: bool = False, + center: Optional[List[int]] = None, fill: Optional[int] = None +) -> Tensor: + """Rotate the image by angle. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): image to be rotated. + angle (float or int): rotation angle value in degrees, counter-clockwise. + resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional): + An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``. + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (list or tuple, optional): Optional center of rotation. Origin is the upper left corner. + Default is the center of the image. + fill (n-tuple or int or float): Pixel fill value for area outside the rotated + image. If int or float, the value is used for all bands respectively. + Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``. + This option is not supported for Tensor input. Fill value for the area outside the transform in the output + image is always 0. + + Returns: + PIL Image or Tensor: Rotated image. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + if not isinstance(angle, (int, float)): + raise TypeError("Argument angle should be int or float") + + if center is not None and not isinstance(center, (list, tuple)): + raise TypeError("Argument center should be a sequence") + + if not isinstance(img, torch.Tensor): + return F_pil.rotate(img, angle=angle, resample=resample, expand=expand, center=center, fill=fill) + + center_f = [0.0, 0.0] + if center is not None: + img_size = _get_image_size(img) + # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center. + center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)] + + # due to current incoherence of rotation angle direction between affine and rotate implementations + # we need to set -angle. + matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0]) + return F_t.rotate(img, matrix=matrix, resample=resample, expand=expand, fill=fill)
+ + +
[docs]def affine( + img: Tensor, angle: float, translate: List[int], scale: float, shear: List[float], + resample: int = 0, fillcolor: Optional[int] = None +) -> Tensor: + """Apply affine transformation on the image keeping image center invariant. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): image to transform. + angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction. + translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation) + scale (float): overall scale + shear (float or tuple or list): shear angle value in degrees between -180 to 180, clockwise direction. + If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while + the second value corresponds to a shear parallel to the y axis. + resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional): + An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image is PIL Image and has mode "1" or "P", it is set to ``PIL.Image.NEAREST``. + If input is Tensor, only ``PIL.Image.NEAREST`` and ``PIL.Image.BILINEAR`` are supported. + fillcolor (int): Optional fill color for the area outside the transform in the output image (Pillow>=5.0.0). + This option is not supported for Tensor input. Fill value for the area outside the transform in the output + image is always 0. + + Returns: + PIL Image or Tensor: Transformed image. + """ + if not isinstance(angle, (int, float)): + raise TypeError("Argument angle should be int or float") + + if not isinstance(translate, (list, tuple)): + raise TypeError("Argument translate should be a sequence") + + if len(translate) != 2: + raise ValueError("Argument translate should be a sequence of length 2") + + if scale <= 0.0: + raise ValueError("Argument scale should be positive") + + if not isinstance(shear, (numbers.Number, (list, tuple))): + raise TypeError("Shear should be either a single value or a sequence of two values") + + if isinstance(angle, int): + angle = float(angle) + + if isinstance(translate, tuple): + translate = list(translate) + + if isinstance(shear, numbers.Number): + shear = [shear, 0.0] + + if isinstance(shear, tuple): + shear = list(shear) + + if len(shear) == 1: + shear = [shear[0], shear[0]] + + if len(shear) != 2: + raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear)) + + img_size = _get_image_size(img) + if not isinstance(img, torch.Tensor): + # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5) + # it is visually better to estimate the center without 0.5 offset + # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine + center = [img_size[0] * 0.5, img_size[1] * 0.5] + matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear) + + return F_pil.affine(img, matrix=matrix, resample=resample, fillcolor=fillcolor) + + translate_f = [1.0 * t for t in translate] + matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear) + return F_t.affine(img, matrix=matrix, resample=resample, fillcolor=fillcolor)
+ + +
[docs]@torch.jit.unused +def to_grayscale(img, num_output_channels=1): + """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image. + + Args: + img (PIL Image): PIL Image to be converted to grayscale. + num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1. + + Returns: + PIL Image: Grayscale version of the image. + if num_output_channels = 1 : returned image is single channel + + if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if isinstance(img, Image.Image): + return F_pil.to_grayscale(img, num_output_channels) + + raise TypeError("Input should be PIL Image")
+ + +
[docs]def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: + """Convert RGB image to grayscale version of image. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Note: + Please, note that this method supports only RGB images as input. For inputs in other color spaces, + please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image. + + Args: + img (PIL Image or Tensor): RGB Image to be converted to grayscale. + num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1. + + Returns: + PIL Image or Tensor: Grayscale version of the image. + if num_output_channels = 1 : returned image is single channel + + if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if not isinstance(img, torch.Tensor): + return F_pil.to_grayscale(img, num_output_channels) + + return F_t.rgb_to_grayscale(img, num_output_channels)
+ + +
[docs]def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor: + """ Erase the input Tensor Image with given value. + + Args: + img (Tensor Image): Tensor image of size (C, H, W) to be erased + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the erased region. + w (int): Width of the erased region. + v: Erasing value. + inplace(bool, optional): For in-place operations. By default is set False. + + Returns: + Tensor Image: Erased image. + """ + if not isinstance(img, torch.Tensor): + raise TypeError('img should be Tensor Image. Got {}'.format(type(img))) + + if not inplace: + img = img.clone() + + img[..., i:i + h, j:j + w] = v + return img
+ + +
[docs]def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor: + """Performs Gaussian blurring on the img by given kernel. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + img (PIL Image or Tensor): Image to be blurred + kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers + like ``(kx, ky)`` or a single integer for square kernels. + In torchscript mode kernel_size as single int is not supported, use a tuple or + list of length 1: ``[ksize, ]``. + sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a + sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the + same sigma in both X/Y directions. If None, then it is computed using + ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``. + Default, None. In torchscript mode sigma as single float is + not supported, use a tuple or list of length 1: ``[sigma, ]``. + + Returns: + PIL Image or Tensor: Gaussian Blurred version of the image. + """ + if not isinstance(kernel_size, (int, list, tuple)): + raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size))) + if isinstance(kernel_size, int): + kernel_size = [kernel_size, kernel_size] + if len(kernel_size) != 2: + raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size))) + for ksize in kernel_size: + if ksize % 2 == 0 or ksize < 0: + raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size)) + + if sigma is None: + sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size] + + if sigma is not None and not isinstance(sigma, (int, float, list, tuple)): + raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma))) + if isinstance(sigma, (int, float)): + sigma = [float(sigma), float(sigma)] + if isinstance(sigma, (list, tuple)) and len(sigma) == 1: + sigma = [sigma[0], sigma[0]] + if len(sigma) != 2: + raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma))) + for s in sigma: + if s <= 0.: + raise ValueError('sigma should have positive values. Got {}'.format(sigma)) + + t_img = img + if not isinstance(img, torch.Tensor): + if not F_pil._is_pil_image(img): + raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img))) + + t_img = to_tensor(img) + + output = F_t.gaussian_blur(t_img, kernel_size, sigma) + + if not isinstance(img, torch.Tensor): + output = to_pil_image(output) + return output
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/transforms/transforms.html b/docs/1.7.0/torchvision/_modules/torchvision/transforms/transforms.html new file mode 100644 index 000000000000..30c8cda090e7 --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/transforms/transforms.html @@ -0,0 +1,2160 @@ + + + + + + + + + + + + torchvision.transforms.transforms — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.transforms.transforms

+import math
+import numbers
+import random
+import warnings
+from collections.abc import Sequence
+from typing import Tuple, List, Optional
+
+import torch
+from PIL import Image
+from torch import Tensor
+
+try:
+    import accimage
+except ImportError:
+    accimage = None
+
+from . import functional as F
+
+__all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale",
+           "CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop",
+           "RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop",
+           "LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
+           "RandomPerspective", "RandomErasing", "GaussianBlur"]
+
+_pil_interpolation_to_str = {
+    Image.NEAREST: 'PIL.Image.NEAREST',
+    Image.BILINEAR: 'PIL.Image.BILINEAR',
+    Image.BICUBIC: 'PIL.Image.BICUBIC',
+    Image.LANCZOS: 'PIL.Image.LANCZOS',
+    Image.HAMMING: 'PIL.Image.HAMMING',
+    Image.BOX: 'PIL.Image.BOX',
+}
+
+
+
[docs]class Compose: + """Composes several transforms together. This transform does not support torchscript. + Please, see the note below. + + Args: + transforms (list of ``Transform`` objects): list of transforms to compose. + + Example: + >>> transforms.Compose([ + >>> transforms.CenterCrop(10), + >>> transforms.ToTensor(), + >>> ]) + + .. note:: + In order to script the transformations, please use ``torch.nn.Sequential`` as below. + + >>> transforms = torch.nn.Sequential( + >>> transforms.CenterCrop(10), + >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + >>> ) + >>> scripted_transforms = torch.jit.script(transforms) + + Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require + `lambda` functions or ``PIL.Image``. + + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, img): + for t in self.transforms: + img = t(img) + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string
+ + +
[docs]class ToTensor: + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript. + + Converts a PIL Image or numpy.ndarray (H x W x C) in the range + [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] + if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) + or if the numpy.ndarray has dtype = np.uint8 + + In the other cases, tensors are returned without scaling. + + .. note:: + Because the input image is scaled to [0.0, 1.0], this transformation should not be used when + transforming target image masks. See the `references`_ for implementing the transforms for image masks. + + .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation + """ + + def __call__(self, pic): + """ + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + return F.to_tensor(pic) + + def __repr__(self): + return self.__class__.__name__ + '()'
+ + +class PILToTensor: + """Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript. + + Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W). + """ + + def __call__(self, pic): + """ + Args: + pic (PIL Image): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + return F.pil_to_tensor(pic) + + def __repr__(self): + return self.__class__.__name__ + '()' + + +
[docs]class ConvertImageDtype(torch.nn.Module): + """Convert a tensor image to the given ``dtype`` and scale the values accordingly + + Args: + dtype (torch.dtype): Desired data type of the output + + .. note:: + + When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly. + If converted back and forth, this mismatch has no effect. + + Raises: + RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as + well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to + overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range + of the integer ``dtype``. + """ + + def __init__(self, dtype: torch.dtype) -> None: + super().__init__() + self.dtype = dtype + + def forward(self, image: torch.Tensor) -> torch.Tensor: + return F.convert_image_dtype(image, self.dtype)
+ + +
[docs]class ToPILImage: + """Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript. + + Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape + H x W x C to a PIL Image while preserving the value range. + + Args: + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + If ``mode`` is ``None`` (default) there are some assumptions made about the input data: + - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``. + - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``. + - If the input has 2 channels, the ``mode`` is assumed to be ``LA``. + - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, + ``short``). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + """ + def __init__(self, mode=None): + self.mode = mode + + def __call__(self, pic): + """ + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + + Returns: + PIL Image: Image converted to PIL Image. + + """ + return F.to_pil_image(pic, self.mode) + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + if self.mode is not None: + format_string += 'mode={0}'.format(self.mode) + format_string += ')' + return format_string
+ + +
[docs]class Normalize(torch.nn.Module): + """Normalize a tensor image with mean and standard deviation. + Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` + channels, this transform will normalize each channel of the input + ``torch.*Tensor`` i.e., + ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` + + .. note:: + This transform acts out of place, i.e., it does not mutate the input tensor. + + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation in-place. + + """ + + def __init__(self, mean, std, inplace=False): + super().__init__() + self.mean = mean + self.std = std + self.inplace = inplace + +
[docs] def forward(self, tensor: Tensor) -> Tensor: + """ + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be normalized. + + Returns: + Tensor: Normalized Tensor image. + """ + return F.normalize(tensor, self.mean, self.std, self.inplace)
+ + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
+ + +
[docs]class Resize(torch.nn.Module): + """Resize the input image to the given size. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + size (sequence or int): Desired output size. If size is a sequence like + (h, w), output size will be matched to this. If size is an int, + smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to + (size * height / width, size). + In torchscript mode padding as single int is not supported, use a tuple or + list of length 1: ``[size, ]``. + interpolation (int, optional): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + super().__init__() + self.size = _setup_size(size, error_msg="If size is a sequence, it should have 2 values") + self.interpolation = interpolation + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be scaled. + + Returns: + PIL Image or Tensor: Rescaled image. + """ + return F.resize(img, self.size, self.interpolation)
+ + def __repr__(self): + interpolate_str = _pil_interpolation_to_str[self.interpolation] + return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
+ + +
[docs]class Scale(Resize): + """ + Note: This transform is deprecated in favor of Resize. + """ + def __init__(self, *args, **kwargs): + warnings.warn("The use of the transforms.Scale transform is deprecated, " + + "please use transforms.Resize instead.") + super(Scale, self).__init__(*args, **kwargs)
+ + +
[docs]class CenterCrop(torch.nn.Module): + """Crops the given image at the center. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + """ + + def __init__(self, size): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + PIL Image or Tensor: Cropped image. + """ + return F.center_crop(img, self.size)
+ + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size)
+ + +
[docs]class Pad(torch.nn.Module): + """Pad the given image on all sides with the given "pad" value. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + padding (int or tuple or list): Padding on each border. If a single int is provided this + is used to pad all borders. If tuple of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a tuple of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + In torchscript mode padding as single int is not supported, use a tuple or + list of length 1: ``[padding, ]``. + fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. Mode symmetric is not yet supported for Tensor inputs. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image + + - reflect: pads with reflection of image without repeating the last value on the edge + + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge + + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + """ + + def __init__(self, padding, fill=0, padding_mode="constant"): + super().__init__() + if not isinstance(padding, (numbers.Number, tuple, list)): + raise TypeError("Got inappropriate padding arg") + + if not isinstance(fill, (numbers.Number, str, tuple)): + raise TypeError("Got inappropriate fill arg") + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]: + raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding))) + + self.padding = padding + self.fill = fill + self.padding_mode = padding_mode + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be padded. + + Returns: + PIL Image or Tensor: Padded image. + """ + return F.pad(img, self.padding, self.fill, self.padding_mode)
+ + def __repr__(self): + return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\ + format(self.padding, self.fill, self.padding_mode)
+ + +
[docs]class Lambda: + """Apply a user-defined lambda as a transform. This transform does not support torchscript. + + Args: + lambd (function): Lambda/function to be used for transform. + """ + + def __init__(self, lambd): + if not callable(lambd): + raise TypeError("Argument lambd should be callable, got {}".format(repr(type(lambd).__name__))) + self.lambd = lambd + + def __call__(self, img): + return self.lambd(img) + + def __repr__(self): + return self.__class__.__name__ + '()'
+ + +class RandomTransforms: + """Base class for a list of transformations with randomness + + Args: + transforms (list or tuple): list of transformations + """ + + def __init__(self, transforms): + if not isinstance(transforms, Sequence): + raise TypeError("Argument transforms should be a sequence") + self.transforms = transforms + + def __call__(self, *args, **kwargs): + raise NotImplementedError() + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + + +
[docs]class RandomApply(torch.nn.Module): + """Apply randomly a list of transformations with a given probability. + + .. note:: + In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of + transforms as shown below: + + >>> transforms = transforms.RandomApply(torch.nn.ModuleList([ + >>> transforms.ColorJitter(), + >>> ]), p=0.3) + >>> scripted_transforms = torch.jit.script(transforms) + + Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require + `lambda` functions or ``PIL.Image``. + + Args: + transforms (list or tuple or torch.nn.Module): list of transformations + p (float): probability + """ + + def __init__(self, transforms, p=0.5): + super().__init__() + self.transforms = transforms + self.p = p + + def forward(self, img): + if self.p < torch.rand(1): + return img + for t in self.transforms: + img = t(img) + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + format_string += '\n p={}'.format(self.p) + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string
+ + +
[docs]class RandomOrder(RandomTransforms): + """Apply a list of transformations in a random order. This transform does not support torchscript. + """ + def __call__(self, img): + order = list(range(len(self.transforms))) + random.shuffle(order) + for i in order: + img = self.transforms[i](img) + return img
+ + +
[docs]class RandomChoice(RandomTransforms): + """Apply single transformation randomly picked from a list. This transform does not support torchscript. + """ + def __call__(self, img): + t = random.choice(self.transforms) + return t(img)
+ + +
[docs]class RandomCrop(torch.nn.Module): + """Crop the given image at a random location. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + padding (int or sequence, optional): Optional padding on each border + of the image. Default is None. If a single int is provided this + is used to pad all borders. If tuple of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a tuple of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + In torchscript mode padding as single int is not supported, use a tuple or + list of length 1: ``[padding, ]``. + pad_if_needed (boolean): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. + Mode symmetric is not yet supported for Tensor inputs. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value on the edge of the image + + - reflect: pads with reflection of image (without repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image (repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + + """ + +
[docs] @staticmethod + def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]: + """Get parameters for ``crop`` for a random crop. + + Args: + img (PIL Image or Tensor): Image to be cropped. + output_size (tuple): Expected output size of the crop. + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. + """ + w, h = F._get_image_size(img) + th, tw = output_size + if w == tw and h == th: + return 0, 0, h, w + + i = torch.randint(0, h - th + 1, size=(1, )).item() + j = torch.randint(0, w - tw + 1, size=(1, )).item() + return i, j, th, tw
+ + def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"): + super().__init__() + + self.size = tuple(_setup_size( + size, error_msg="Please provide only two dimensions (h, w) for size." + )) + + self.padding = padding + self.pad_if_needed = pad_if_needed + self.fill = fill + self.padding_mode = padding_mode + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + PIL Image or Tensor: Cropped image. + """ + if self.padding is not None: + img = F.pad(img, self.padding, self.fill, self.padding_mode) + + width, height = F._get_image_size(img) + # pad the width if needed + if self.pad_if_needed and width < self.size[1]: + padding = [self.size[1] - width, 0] + img = F.pad(img, padding, self.fill, self.padding_mode) + # pad the height if needed + if self.pad_if_needed and height < self.size[0]: + padding = [0, self.size[0] - height] + img = F.pad(img, padding, self.fill, self.padding_mode) + + i, j, h, w = self.get_params(img, self.size) + + return F.crop(img, i, j, h, w)
+ + def __repr__(self): + return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding)
+ + +
[docs]class RandomHorizontalFlip(torch.nn.Module): + """Horizontally flip the given image randomly with a given probability. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + p (float): probability of the image being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be flipped. + + Returns: + PIL Image or Tensor: Randomly flipped image. + """ + if torch.rand(1) < self.p: + return F.hflip(img) + return img
+ + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p)
+ + +
[docs]class RandomVerticalFlip(torch.nn.Module): + """Vertically flip the given image randomly with a given probability. + The image can be a PIL Image or a torch Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + p (float): probability of the image being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be flipped. + + Returns: + PIL Image or Tensor: Randomly flipped image. + """ + if torch.rand(1) < self.p: + return F.vflip(img) + return img
+ + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p)
+ + +
[docs]class RandomPerspective(torch.nn.Module): + """Performs a random perspective transformation of the given image with a given probability. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1. + Default is 0.5. + p (float): probability of the image being transformed. Default is 0.5. + interpolation (int): Interpolation type. If input is Tensor, only ``PIL.Image.NEAREST`` and + ``PIL.Image.BILINEAR`` are supported. Default, ``PIL.Image.BILINEAR`` for PIL images and Tensors. + fill (n-tuple or int or float): Pixel fill value for area outside the rotated + image. If int or float, the value is used for all bands respectively. Default is 0. + This option is only available for ``pillow>=5.0.0``. This option is not supported for Tensor + input. Fill value for the area outside the transform in the output image is always 0. + + """ + + def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BILINEAR, fill=0): + super().__init__() + self.p = p + self.interpolation = interpolation + self.distortion_scale = distortion_scale + self.fill = fill + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be Perspectively transformed. + + Returns: + PIL Image or Tensor: Randomly transformed image. + """ + if torch.rand(1) < self.p: + width, height = F._get_image_size(img) + startpoints, endpoints = self.get_params(width, height, self.distortion_scale) + return F.perspective(img, startpoints, endpoints, self.interpolation, self.fill) + return img
+ +
[docs] @staticmethod + def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]: + """Get parameters for ``perspective`` for a random perspective transform. + + Args: + width (int): width of the image. + height (int): height of the image. + distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1. + + Returns: + List containing [top-left, top-right, bottom-right, bottom-left] of the original image, + List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image. + """ + half_height = height // 2 + half_width = width // 2 + topleft = [ + int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()), + int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item()) + ] + topright = [ + int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()), + int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item()) + ] + botright = [ + int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()), + int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item()) + ] + botleft = [ + int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()), + int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item()) + ] + startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]] + endpoints = [topleft, topright, botright, botleft] + return startpoints, endpoints
+ + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p)
+ + +
[docs]class RandomResizedCrop(torch.nn.Module): + """Crop the given image to random size and aspect ratio. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size (int or sequence): expected output size of each edge. If size is an + int instead of sequence like (h, w), a square output size ``(size, size)`` is + made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + scale (tuple of float): range of size of the origin size cropped + ratio (tuple of float): range of aspect ratio of the origin aspect ratio cropped. + interpolation (int): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + if not isinstance(scale, Sequence): + raise TypeError("Scale should be a sequence") + if not isinstance(ratio, Sequence): + raise TypeError("Ratio should be a sequence") + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("Scale and ratio should be of kind (min, max)") + + self.interpolation = interpolation + self.scale = scale + self.ratio = ratio + +
[docs] @staticmethod + def get_params( + img: Tensor, scale: List[float], ratio: List[float] + ) -> Tuple[int, int, int, int]: + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image or Tensor): Input image. + scale (list): range of scale of the origin size cropped + ratio (list): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + width, height = F._get_image_size(img) + area = height * width + + for _ in range(10): + target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + log_ratio = torch.log(torch.tensor(ratio)) + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = torch.randint(0, height - h + 1, size=(1,)).item() + j = torch.randint(0, width - w + 1, size=(1,)).item() + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w
+ +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped and resized. + + Returns: + PIL Image or Tensor: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
+ + def __repr__(self): + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string
+ + +
[docs]class RandomSizedCrop(RandomResizedCrop): + """ + Note: This transform is deprecated in favor of RandomResizedCrop. + """ + def __init__(self, *args, **kwargs): + warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " + + "please use transforms.RandomResizedCrop instead.") + super(RandomSizedCrop, self).__init__(*args, **kwargs)
+ + +
[docs]class FiveCrop(torch.nn.Module): + """Crop the given image into four corners and the central crop. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an ``int`` + instead of sequence like (h, w), a square crop of size (size, size) is made. + If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + + Example: + >>> transform = Compose([ + >>> FiveCrop(size), # this is a list of PIL Images + >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor + >>> ]) + >>> #In your test loop you can do the following: + >>> input, target = batch # input is a 5d tensor, target is 2d + >>> bs, ncrops, c, h, w = input.size() + >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops + >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops + """ + + def __init__(self, size): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + tuple of 5 images. Image can be PIL Image or Tensor + """ + return F.five_crop(img, self.size)
+ + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size)
+ + +
[docs]class TenCrop(torch.nn.Module): + """Crop the given image into four corners and the central crop plus the flipped version of + these (horizontal flipping is used by default). + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). + vertical_flip (bool): Use vertical flipping instead of horizontal + + Example: + >>> transform = Compose([ + >>> TenCrop(size), # this is a list of PIL Images + >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor + >>> ]) + >>> #In your test loop you can do the following: + >>> input, target = batch # input is a 5d tensor, target is 2d + >>> bs, ncrops, c, h, w = input.size() + >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops + >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops + """ + + def __init__(self, size, vertical_flip=False): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + self.vertical_flip = vertical_flip + +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + tuple of 10 images. Image can be PIL Image or Tensor + """ + return F.ten_crop(img, self.size, self.vertical_flip)
+ + def __repr__(self): + return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
+ + +
[docs]class LinearTransformation(torch.nn.Module): + """Transform a tensor image with a square transformation matrix and a mean_vector computed + offline. + Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and + subtract mean_vector from it which is then followed by computing the dot + product with the transformation matrix and then reshaping the tensor to its + original shape. + + Applications: + whitening transformation: Suppose X is a column vector zero-centered data. + Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X), + perform SVD on this matrix and pass it as transformation_matrix. + + Args: + transformation_matrix (Tensor): tensor [D x D], D = C x H x W + mean_vector (Tensor): tensor [D], D = C x H x W + """ + + def __init__(self, transformation_matrix, mean_vector): + super().__init__() + if transformation_matrix.size(0) != transformation_matrix.size(1): + raise ValueError("transformation_matrix should be square. Got " + + "[{} x {}] rectangular matrix.".format(*transformation_matrix.size())) + + if mean_vector.size(0) != transformation_matrix.size(0): + raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) + + " as any one of the dimensions of the transformation_matrix [{}]" + .format(tuple(transformation_matrix.size()))) + + if transformation_matrix.device != mean_vector.device: + raise ValueError("Input tensors should be on the same device. Got {} and {}" + .format(transformation_matrix.device, mean_vector.device)) + + self.transformation_matrix = transformation_matrix + self.mean_vector = mean_vector + +
[docs] def forward(self, tensor: Tensor) -> Tensor: + """ + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be whitened. + + Returns: + Tensor: Transformed image. + """ + shape = tensor.shape + n = shape[-3] * shape[-2] * shape[-1] + if n != self.transformation_matrix.shape[0]: + raise ValueError("Input tensor and transformation matrix have incompatible shape." + + "[{} x {} x {}] != ".format(shape[-3], shape[-2], shape[-1]) + + "{}".format(self.transformation_matrix.shape[0])) + + if tensor.device.type != self.mean_vector.device.type: + raise ValueError("Input tensor should be on the same device as transformation matrix and mean vector. " + "Got {} vs {}".format(tensor.device, self.mean_vector.device)) + + flat_tensor = tensor.view(-1, n) - self.mean_vector + transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix) + tensor = transformed_tensor.view(shape) + return tensor
+ + def __repr__(self): + format_string = self.__class__.__name__ + '(transformation_matrix=' + format_string += (str(self.transformation_matrix.tolist()) + ')') + format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')') + return format_string
+ + +
[docs]class ColorJitter(torch.nn.Module): + """Randomly change the brightness, contrast and saturation of an image. + + Args: + brightness (float or tuple of float (min, max)): How much to jitter brightness. + brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness] + or the given [min, max]. Should be non negative numbers. + contrast (float or tuple of float (min, max)): How much to jitter contrast. + contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast] + or the given [min, max]. Should be non negative numbers. + saturation (float or tuple of float (min, max)): How much to jitter saturation. + saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation] + or the given [min, max]. Should be non negative numbers. + hue (float or tuple of float (min, max)): How much to jitter hue. + hue_factor is chosen uniformly from [-hue, hue] or the given [min, max]. + Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + super().__init__() + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), + clip_first_on_zero=False) + + @torch.jit.unused + def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError("If {} is a single number, it must be non negative.".format(name)) + value = [center - float(value), center + float(value)] + if clip_first_on_zero: + value[0] = max(value[0], 0.0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError("{} values should be between {}".format(name, bound)) + else: + raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name)) + + # if value is 0 or (1., 1.) for brightness/contrast/saturation + # or (0., 0.) for hue, do nothing + if value[0] == value[1] == center: + value = None + return value + +
[docs] @staticmethod + @torch.jit.unused + def get_params(brightness, contrast, saturation, hue): + """Get a randomized transform to be applied on image. + + Arguments are same as that of __init__. + + Returns: + Transform which randomly adjusts brightness, contrast and + saturation in a random order. + """ + transforms = [] + + if brightness is not None: + brightness_factor = random.uniform(brightness[0], brightness[1]) + transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor))) + + if contrast is not None: + contrast_factor = random.uniform(contrast[0], contrast[1]) + transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor))) + + if saturation is not None: + saturation_factor = random.uniform(saturation[0], saturation[1]) + transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor))) + + if hue is not None: + hue_factor = random.uniform(hue[0], hue[1]) + transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor))) + + random.shuffle(transforms) + transform = Compose(transforms) + + return transform
+ +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Input image. + + Returns: + PIL Image or Tensor: Color jittered image. + """ + fn_idx = torch.randperm(4) + for fn_id in fn_idx: + if fn_id == 0 and self.brightness is not None: + brightness = self.brightness + brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item() + img = F.adjust_brightness(img, brightness_factor) + + if fn_id == 1 and self.contrast is not None: + contrast = self.contrast + contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item() + img = F.adjust_contrast(img, contrast_factor) + + if fn_id == 2 and self.saturation is not None: + saturation = self.saturation + saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item() + img = F.adjust_saturation(img, saturation_factor) + + if fn_id == 3 and self.hue is not None: + hue = self.hue + hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item() + img = F.adjust_hue(img, hue_factor) + + return img
+ + def __repr__(self): + format_string = self.__class__.__name__ + '(' + format_string += 'brightness={0}'.format(self.brightness) + format_string += ', contrast={0}'.format(self.contrast) + format_string += ', saturation={0}'.format(self.saturation) + format_string += ', hue={0})'.format(self.hue) + return format_string
+ + +
[docs]class RandomRotation(torch.nn.Module): + """Rotate the image by angle. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + degrees (sequence or float or int): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). + resample (int, optional): An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST. + If input is Tensor, only ``PIL.Image.NEAREST`` and ``PIL.Image.BILINEAR`` are supported. + expand (bool, optional): Optional expansion flag. + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (list or tuple, optional): Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill (n-tuple or int or float): Pixel fill value for area outside the rotated + image. If int or float, the value is used for all bands respectively. + Defaults to 0 for all bands. This option is only available for Pillow>=5.2.0. + This option is not supported for Tensor input. Fill value for the area outside the transform in the output + image is always 0. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + def __init__(self, degrees, resample=False, expand=False, center=None, fill=None): + super().__init__() + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, )) + + if center is not None: + _check_sequence_input(center, "center", req_sizes=(2, )) + + self.center = center + + self.resample = resample + self.expand = expand + self.fill = fill + +
[docs] @staticmethod + def get_params(degrees: List[float]) -> float: + """Get parameters for ``rotate`` for a random rotation. + + Returns: + float: angle parameter to be passed to ``rotate`` for random rotation. + """ + angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item()) + return angle
+ +
[docs] def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be rotated. + + Returns: + PIL Image or Tensor: Rotated image. + """ + angle = self.get_params(self.degrees) + return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)
+ + def __repr__(self): + format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees) + format_string += ', resample={0}'.format(self.resample) + format_string += ', expand={0}'.format(self.expand) + if self.center is not None: + format_string += ', center={0}'.format(self.center) + if self.fill is not None: + format_string += ', fill={0}'.format(self.fill) + format_string += ')' + return format_string
+ + +
[docs]class RandomAffine(torch.nn.Module): + """Random affine transformation of the image keeping center invariant. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + degrees (sequence or float or int): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). Set to 0 to deactivate rotations. + translate (tuple, optional): tuple of maximum absolute fraction for horizontal + and vertical translations. For example translate=(a, b), then horizontal shift + is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is + randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default. + scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is + randomly sampled from the range a <= scale <= b. Will keep original scale by default. + shear (sequence or float or int, optional): Range of degrees to select from. + If shear is a number, a shear parallel to the x axis in the range (-shear, +shear) + will be applied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the + range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values, + a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + Will not apply shear by default. + resample (int, optional): An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``. + If input is Tensor, only ``PIL.Image.NEAREST`` and ``PIL.Image.BILINEAR`` are supported. + fillcolor (tuple or int): Optional fill color (Tuple for RGB Image and int for grayscale) for the area + outside the transform in the output image (Pillow>=5.0.0). This option is not supported for Tensor + input. Fill value for the area outside the transform in the output image is always 0. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + def __init__(self, degrees, translate=None, scale=None, shear=None, resample=0, fillcolor=0): + super().__init__() + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, )) + + if translate is not None: + _check_sequence_input(translate, "translate", req_sizes=(2, )) + for t in translate: + if not (0.0 <= t <= 1.0): + raise ValueError("translation values should be between 0 and 1") + self.translate = translate + + if scale is not None: + _check_sequence_input(scale, "scale", req_sizes=(2, )) + for s in scale: + if s <= 0: + raise ValueError("scale values should be positive") + self.scale = scale + + if shear is not None: + self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4)) + else: + self.shear = shear + + self.resample = resample + self.fillcolor = fillcolor + +
[docs] @staticmethod + def get_params( + degrees: List[float], + translate: Optional[List[float]], + scale_ranges: Optional[List[float]], + shears: Optional[List[float]], + img_size: List[int] + ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]: + """Get parameters for affine transformation + + Returns: + params to be passed to the affine transformation + """ + angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item()) + if translate is not None: + max_dx = float(translate[0] * img_size[0]) + max_dy = float(translate[1] * img_size[1]) + tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item())) + ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item())) + translations = (tx, ty) + else: + translations = (0, 0) + + if scale_ranges is not None: + scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item()) + else: + scale = 1.0 + + shear_x = shear_y = 0.0 + if shears is not None: + shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item()) + if len(shears) == 4: + shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item()) + + shear = (shear_x, shear_y) + + return angle, translations, scale, shear
+ +
[docs] def forward(self, img): + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Affine transformed image. + """ + + img_size = F._get_image_size(img) + + ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size) + return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)
+ + def __repr__(self): + s = '{name}(degrees={degrees}' + if self.translate is not None: + s += ', translate={translate}' + if self.scale is not None: + s += ', scale={scale}' + if self.shear is not None: + s += ', shear={shear}' + if self.resample > 0: + s += ', resample={resample}' + if self.fillcolor != 0: + s += ', fillcolor={fillcolor}' + s += ')' + d = dict(self.__dict__) + d['resample'] = _pil_interpolation_to_str[d['resample']] + return s.format(name=self.__class__.__name__, **d)
+ + +
[docs]class Grayscale(torch.nn.Module): + """Convert image to grayscale. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + num_output_channels (int): (1 or 3) number of channels desired for output image + + Returns: + PIL Image: Grayscale version of the input. + - If ``num_output_channels == 1`` : returned image is single channel + - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b + + """ + + def __init__(self, num_output_channels=1): + super().__init__() + self.num_output_channels = num_output_channels + +
[docs] def forward(self, img: Tensor) -> Tensor: + """ + Args: + img (PIL Image or Tensor): Image to be converted to grayscale. + + Returns: + PIL Image or Tensor: Grayscaled image. + """ + return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)
+ + def __repr__(self): + return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
+ + +
[docs]class RandomGrayscale(torch.nn.Module): + """Randomly convert image to grayscale with a probability of p (default 0.1). + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + p (float): probability that image should be converted to grayscale. + + Returns: + PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged + with probability (1-p). + - If input image is 1 channel: grayscale version is 1 channel + - If input image is 3 channel: grayscale version is 3 channel with r == g == b + + """ + + def __init__(self, p=0.1): + super().__init__() + self.p = p + +
[docs] def forward(self, img: Tensor) -> Tensor: + """ + Args: + img (PIL Image or Tensor): Image to be converted to grayscale. + + Returns: + PIL Image or Tensor: Randomly grayscaled image. + """ + num_output_channels = F._get_image_num_channels(img) + if torch.rand(1) < self.p: + return F.rgb_to_grayscale(img, num_output_channels=num_output_channels) + return img
+ + def __repr__(self): + return self.__class__.__name__ + '(p={0})'.format(self.p)
+ + +
[docs]class RandomErasing(torch.nn.Module): + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896 + + Args: + p: probability that the random erasing operation will be performed. + scale: range of proportion of erased area against input image. + ratio: range of aspect ratio of erased area. + value: erasing value. Default is 0. If a single int, it is used to + erase all pixels. If a tuple of length 3, it is used to erase + R, G, B channels respectively. + If a str of 'random', erasing each pixel with random values. + inplace: boolean to make this transform inplace. Default set to False. + + Returns: + Erased Image. + + # Examples: + >>> transform = transforms.Compose([ + >>> transforms.RandomHorizontalFlip(), + >>> transforms.ToTensor(), + >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + >>> transforms.RandomErasing(), + >>> ]) + """ + + def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False): + super().__init__() + if not isinstance(value, (numbers.Number, str, tuple, list)): + raise TypeError("Argument value should be either a number or str or a sequence") + if isinstance(value, str) and value != "random": + raise ValueError("If value is str, it should be 'random'") + if not isinstance(scale, (tuple, list)): + raise TypeError("Scale should be a sequence") + if not isinstance(ratio, (tuple, list)): + raise TypeError("Ratio should be a sequence") + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("Scale and ratio should be of kind (min, max)") + if scale[0] < 0 or scale[1] > 1: + raise ValueError("Scale should be between 0 and 1") + if p < 0 or p > 1: + raise ValueError("Random erasing probability should be between 0 and 1") + + self.p = p + self.scale = scale + self.ratio = ratio + self.value = value + self.inplace = inplace + +
[docs] @staticmethod + def get_params( + img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None + ) -> Tuple[int, int, int, int, Tensor]: + """Get parameters for ``erase`` for a random erasing. + + Args: + img (Tensor): Tensor image of size (C, H, W) to be erased. + scale (tuple or list): range of proportion of erased area against input image. + ratio (tuple or list): range of aspect ratio of erased area. + value (list, optional): erasing value. If None, it is interpreted as "random" + (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number, + i.e. ``value[0]``. + + Returns: + tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing. + """ + img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1] + area = img_h * img_w + + for _ in range(10): + erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item() + + h = int(round(math.sqrt(erase_area * aspect_ratio))) + w = int(round(math.sqrt(erase_area / aspect_ratio))) + if not (h < img_h and w < img_w): + continue + + if value is None: + v = torch.empty([img_c, h, w], dtype=torch.float32).normal_() + else: + v = torch.tensor(value)[:, None, None] + + i = torch.randint(0, img_h - h + 1, size=(1, )).item() + j = torch.randint(0, img_w - w + 1, size=(1, )).item() + return i, j, h, w, v + + # Return original image + return 0, 0, img_h, img_w, img
+ +
[docs] def forward(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W) to be erased. + + Returns: + img (Tensor): Erased Tensor image. + """ + if torch.rand(1) < self.p: + + # cast self.value to script acceptable type + if isinstance(self.value, (int, float)): + value = [self.value, ] + elif isinstance(self.value, str): + value = None + elif isinstance(self.value, tuple): + value = list(self.value) + else: + value = self.value + + if value is not None and not (len(value) in (1, img.shape[-3])): + raise ValueError( + "If value is a sequence, it should have either a single value or " + "{} (number of input channels)".format(img.shape[-3]) + ) + + x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value) + return F.erase(img, x, y, h, w, v, self.inplace) + return img
+ + +
[docs]class GaussianBlur(torch.nn.Module): + """Blurs image with randomly chosen Gaussian blur. + The image can be a PIL Image or a Tensor, in which case it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + kernel_size (int or sequence): Size of the Gaussian kernel. + sigma (float or tuple of float (min, max)): Standard deviation to be used for + creating kernel to perform blurring. If float, sigma is fixed. If it is tuple + of float (min, max), sigma is chosen uniformly at random to lie in the + given range. + + Returns: + PIL Image or Tensor: Gaussian blurred version of the input image. + + """ + + def __init__(self, kernel_size, sigma=(0.1, 2.0)): + super().__init__() + self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers") + for ks in self.kernel_size: + if ks <= 0 or ks % 2 == 0: + raise ValueError("Kernel size value should be an odd and positive number.") + + if isinstance(sigma, numbers.Number): + if sigma <= 0: + raise ValueError("If sigma is a single number, it must be positive.") + sigma = (sigma, sigma) + elif isinstance(sigma, Sequence) and len(sigma) == 2: + if not 0. < sigma[0] <= sigma[1]: + raise ValueError("sigma values should be positive and of the form (min, max).") + else: + raise ValueError("sigma should be a single number or a list/tuple with length 2.") + + self.sigma = sigma + +
[docs] @staticmethod + def get_params(sigma_min: float, sigma_max: float) -> float: + """Choose sigma for ``gaussian_blur`` for random gaussian blurring. + + Args: + sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel. + sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel. + + Returns: + float: Standard deviation to be passed to calculate kernel for gaussian blurring. + """ + return torch.empty(1).uniform_(sigma_min, sigma_max).item()
+ +
[docs] def forward(self, img: Tensor) -> Tensor: + """ + Args: + img (PIL Image or Tensor): image of size (C, H, W) to be blurred. + + Returns: + PIL Image or Tensor: Gaussian blurred image + """ + sigma = self.get_params(self.sigma[0], self.sigma[1]) + return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])
+ + def __repr__(self): + s = '(kernel_size={}, '.format(self.kernel_size) + s += 'sigma={})'.format(self.sigma) + return self.__class__.__name__ + s
+ + +def _setup_size(size, error_msg): + if isinstance(size, numbers.Number): + return int(size), int(size) + + if isinstance(size, Sequence) and len(size) == 1: + return size[0], size[0] + + if len(size) != 2: + raise ValueError(error_msg) + + return size + + +def _check_sequence_input(x, name, req_sizes): + msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes]) + if not isinstance(x, Sequence): + raise TypeError("{} should be a sequence of length {}.".format(name, msg)) + if len(x) not in req_sizes: + raise ValueError("{} should be sequence of length {}.".format(name, msg)) + + +def _setup_angle(x, name, req_sizes=(2, )): + if isinstance(x, numbers.Number): + if x < 0: + raise ValueError("If {} is a single number, it must be positive.".format(name)) + x = [-x, x] + else: + _check_sequence_input(x, name, req_sizes) + + return [float(d) for d in x] +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_modules/torchvision/utils.html b/docs/1.7.0/torchvision/_modules/torchvision/utils.html new file mode 100644 index 000000000000..a19d5cac9afb --- /dev/null +++ b/docs/1.7.0/torchvision/_modules/torchvision/utils.html @@ -0,0 +1,682 @@ + + + + + + + + + + + + torchvision.utils — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchvision.utils

+from typing import Union, Optional, List, Tuple, Text, BinaryIO
+import io
+import pathlib
+import torch
+import math
+irange = range
+
+
+
[docs]def make_grid( + tensor: Union[torch.Tensor, List[torch.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: int = 0, +) -> torch.Tensor: + """Make a grid of images. + + Args: + tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) + or a list of images all of the same size. + nrow (int, optional): Number of images displayed in each row of the grid. + The final grid size is ``(B / nrow, nrow)``. Default: ``8``. + padding (int, optional): amount of padding. Default: ``2``. + normalize (bool, optional): If True, shift the image to the range (0, 1), + by the min and max values specified by :attr:`range`. Default: ``False``. + range (tuple, optional): tuple (min, max) where min and max are numbers, + then these numbers are used to normalize the image. By default, min and max + are computed from the tensor. + scale_each (bool, optional): If ``True``, scale each image in the batch of + images separately rather than the (min, max) over all images. Default: ``False``. + pad_value (float, optional): Value for the padded pixels. Default: ``0``. + + Example: + See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_ + + """ + if not (torch.is_tensor(tensor) or + (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = torch.stack(tensor, dim=0) + + if tensor.dim() == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.dim() == 3: # single image + if tensor.size(0) == 1: # if single-channel, convert to 3-channel + tensor = torch.cat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + + if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images + tensor = torch.cat((tensor, tensor, tensor), 1) + + if normalize is True: + tensor = tensor.clone() # avoid modifying tensor in-place + if range is not None: + assert isinstance(range, tuple), \ + "range has to be a tuple (min, max) if specified. min and max are numbers" + + def norm_ip(img, min, max): + img.clamp_(min=min, max=max) + img.add_(-min).div_(max - min + 1e-5) + + def norm_range(t, range): + if range is not None: + norm_ip(t, range[0], range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, range) + else: + norm_range(tensor, range) + + if tensor.size(0) == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.size(0) + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) + num_channels = tensor.size(1) + grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value) + k = 0 + for y in irange(ymaps): + for x in irange(xmaps): + if k >= nmaps: + break + # Tensor.copy_() is a valid method but seems to be missing from the stubs + # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_ + grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined] + 2, x * width + padding, width - padding + ).copy_(tensor[k]) + k = k + 1 + return grid
+ + +
[docs]def save_image( + tensor: Union[torch.Tensor, List[torch.Tensor]], + fp: Union[Text, pathlib.Path, BinaryIO], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: int = 0, + format: Optional[str] = None, +) -> None: + """Save a given Tensor into an image file. + + Args: + tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, + saves the tensor as a grid of images by calling ``make_grid``. + fp (string or file object): A filename or a file object + format(Optional): If omitted, the format to use is determined from the filename extension. + If a file object was used instead of a filename, this parameter should always be used. + **kwargs: Other arguments are documented in ``make_grid``. + """ + from PIL import Image + grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, + normalize=normalize, range=range, scale_each=scale_each) + # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() + im = Image.fromarray(ndarr) + im.save(fp, format=format)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_sources/datasets.rst.txt b/docs/1.7.0/torchvision/_sources/datasets.rst.txt new file mode 100644 index 000000000000..fa04aded1eae --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/datasets.rst.txt @@ -0,0 +1,233 @@ +torchvision.datasets +==================== + +All datasets are subclasses of :class:`torch.utils.data.Dataset` +i.e, they have ``__getitem__`` and ``__len__`` methods implemented. +Hence, they can all be passed to a :class:`torch.utils.data.DataLoader` +which can load multiple samples parallelly using ``torch.multiprocessing`` workers. +For example: :: + + imagenet_data = torchvision.datasets.ImageNet('path/to/imagenet_root/') + data_loader = torch.utils.data.DataLoader(imagenet_data, + batch_size=4, + shuffle=True, + num_workers=args.nThreads) + +The following datasets are available: + +.. contents:: Datasets + :local: + +All the datasets have almost similar API. They all have two common arguments: +``transform`` and ``target_transform`` to transform the input and target respectively. + + +.. currentmodule:: torchvision.datasets + +CelebA +~~~~~~ + +.. autoclass:: CelebA + :members: __getitem__ + :special-members: + +CIFAR +~~~~~ + +.. autoclass:: CIFAR10 + :members: __getitem__ + :special-members: + +.. autoclass:: CIFAR100 + +Cityscapes +~~~~~~~~~~ + +.. note :: + Requires Cityscape to be downloaded. + +.. autoclass:: Cityscapes + :members: __getitem__ + :special-members: + +COCO +~~~~ + +.. note :: + These require the `COCO API to be installed`_ + +.. _COCO API to be installed: https://github.com/pdollar/coco/tree/master/PythonAPI + + +Captions +^^^^^^^^ + +.. autoclass:: CocoCaptions + :members: __getitem__ + :special-members: + + +Detection +^^^^^^^^^ + +.. autoclass:: CocoDetection + :members: __getitem__ + :special-members: + +DatasetFolder +~~~~~~~~~~~~~ + +.. autoclass:: DatasetFolder + :members: __getitem__ + :special-members: + + +EMNIST +~~~~~~ + +.. autoclass:: EMNIST + +FakeData +~~~~~~~~ + +.. autoclass:: FakeData + +Fashion-MNIST +~~~~~~~~~~~~~ + +.. autoclass:: FashionMNIST + +Flickr +~~~~~~ + +.. autoclass:: Flickr8k + :members: __getitem__ + :special-members: + +.. autoclass:: Flickr30k + :members: __getitem__ + :special-members: + +HMDB51 +~~~~~~~ + +.. autoclass:: HMDB51 + :members: __getitem__ + :special-members: + +ImageFolder +~~~~~~~~~~~ + +.. autoclass:: ImageFolder + :members: __getitem__ + :special-members: + +ImageNet +~~~~~~~~~~~ + +.. autoclass:: ImageNet + +.. note :: + This requires `scipy` to be installed + +Kinetics-400 +~~~~~~~~~~~~ + +.. autoclass:: Kinetics400 + :members: __getitem__ + :special-members: + +KMNIST +~~~~~~~~~~~~~ + +.. autoclass:: KMNIST + +LSUN +~~~~ + +.. autoclass:: LSUN + :members: __getitem__ + :special-members: + +MNIST +~~~~~ + +.. autoclass:: MNIST + +Omniglot +~~~~~~ + +.. autoclass:: Omniglot + +PhotoTour +~~~~~~~~~ + +.. autoclass:: PhotoTour + :members: __getitem__ + :special-members: + +Places365 +~~~~~~~~~ + +.. autoclass:: Places365 + :members: __getitem__ + :special-members: + +QMNIST +~~~~~~ + +.. autoclass:: QMNIST + +SBD +~~~~~~ + +.. autoclass:: SBDataset + :members: __getitem__ + :special-members: + +SBU +~~~ + +.. autoclass:: SBU + :members: __getitem__ + :special-members: + +STL10 +~~~~~ + +.. autoclass:: STL10 + :members: __getitem__ + :special-members: + +SVHN +~~~~~ + +.. autoclass:: SVHN + :members: __getitem__ + :special-members: + +UCF101 +~~~~~~~ + +.. autoclass:: UCF101 + :members: __getitem__ + :special-members: + +USPS +~~~~~ + +.. autoclass:: USPS + :members: __getitem__ + :special-members: + +VOC +~~~~~~ + +.. autoclass:: VOCSegmentation + :members: __getitem__ + :special-members: + +.. autoclass:: VOCDetection + :members: __getitem__ + :special-members: + diff --git a/docs/1.7.0/torchvision/_sources/index.rst.txt b/docs/1.7.0/torchvision/_sources/index.rst.txt new file mode 100644 index 000000000000..288f14d6a21b --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/index.rst.txt @@ -0,0 +1,54 @@ +torchvision +=========== +This library is part of the `PyTorch +`_ project. PyTorch is an open source +machine learning framework. + +Features described in this documentation are classified by release status: + + *Stable:* These features will be maintained long-term and there should generally + be no major performance limitations or gaps in documentation. + We also expect to maintain backwards compatibility (although + breaking changes can happen and notice will be given one release ahead + of time). + + *Beta:* Features are tagged as Beta because the API may change based on + user feedback, because the performance needs to improve, or because + coverage across operators is not yet complete. For Beta features, we are + committing to seeing the feature through to the Stable classification. + We are not, however, committing to backwards compatibility. + + *Prototype:* These features are typically not available as part of + binary distributions like PyPI or Conda, except sometimes behind run-time + flags, and are at an early stage for feedback and testing. + + + +The :mod:`torchvision` package consists of popular datasets, model +architectures, and common image transformations for computer vision. + +.. toctree:: + :maxdepth: 2 + :caption: Package Reference + + datasets + io + models + ops + transforms + utils + +.. automodule:: torchvision + :members: + +.. toctree:: + :maxdepth: 1 + :caption: PyTorch Libraries + + PyTorch + torchaudio + torchtext + torchvision + TorchElastic + TorchServe + PyTorch on XLA Devices diff --git a/docs/1.7.0/torchvision/_sources/io.rst.txt b/docs/1.7.0/torchvision/_sources/io.rst.txt new file mode 100644 index 000000000000..9159b82f625b --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/io.rst.txt @@ -0,0 +1,72 @@ +torchvision.io +============== + +.. currentmodule:: torchvision.io + +The :mod:`torchvision.io` package provides functions for performing IO +operations. They are currently specific to reading and writing video and +images. + +Video +----- + +.. autofunction:: read_video + +.. autofunction:: read_video_timestamps + +.. autofunction:: write_video + + +Fine-grained video API +------------------- + +In addition to the :mod:`read_video` function, we provide a high-performance +lower-level API for more fine-grained control compared to the :mod:`read_video` function. +It does all this whilst fully supporting torchscript. + +.. autoclass:: VideoReader + :members: __next__, get_metadata, set_current_stream, seek + + +Example of inspecting a video: + +.. code:: python + + import torchvision + video_path = "path to a test video" + # Constructor allocates memory and a threaded decoder + # instance per video. At the momet it takes two arguments: + # path to the video file, and a wanted stream. + reader = torchvision.io.VideoReader(video_path, "video") + + # The information about the video can be retrieved using the + # `get_metadata()` method. It returns a dictionary for every stream, with + # duration and other relevant metadata (often frame rate) + reader_md = reader.get_metadata() + + # metadata is structured as a dict of dicts with following structure + # {"stream_type": {"attribute": [attribute per stream]}} + # + # following would print out the list of frame rates for every present video stream + print(reader_md["video"]["fps"]) + + # we explicitly select the stream we would like to operate on. In + # the constructor we select a default video stream, but + # in practice, we can set whichever stream we would like + video.set_current_stream("video:0") + + +Image +----- + +.. autofunction:: read_image + +.. autofunction:: decode_image + +.. autofunction:: encode_jpeg + +.. autofunction:: write_jpeg + +.. autofunction:: encode_png + +.. autofunction:: write_png diff --git a/docs/1.7.0/torchvision/_sources/models.rst.txt b/docs/1.7.0/torchvision/_sources/models.rst.txt new file mode 100644 index 000000000000..66ebf0e211d4 --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/models.rst.txt @@ -0,0 +1,485 @@ +torchvision.models +################## + + +The models subpackage contains definitions of models for addressing +different tasks, including: image classification, pixelwise semantic +segmentation, object detection, instance segmentation, person +keypoint detection and video classification. + + +Classification +============== + +The models subpackage contains definitions for the following model +architectures for image classification: + +- `AlexNet`_ +- `VGG`_ +- `ResNet`_ +- `SqueezeNet`_ +- `DenseNet`_ +- `Inception`_ v3 +- `GoogLeNet`_ +- `ShuffleNet`_ v2 +- `MobileNet`_ v2 +- `ResNeXt`_ +- `Wide ResNet`_ +- `MNASNet`_ + +You can construct a model with random weights by calling its constructor: + +.. code:: python + + import torchvision.models as models + resnet18 = models.resnet18() + alexnet = models.alexnet() + vgg16 = models.vgg16() + squeezenet = models.squeezenet1_0() + densenet = models.densenet161() + inception = models.inception_v3() + googlenet = models.googlenet() + shufflenet = models.shufflenet_v2_x1_0() + mobilenet = models.mobilenet_v2() + resnext50_32x4d = models.resnext50_32x4d() + wide_resnet50_2 = models.wide_resnet50_2() + mnasnet = models.mnasnet1_0() + +We provide pre-trained models, using the PyTorch :mod:`torch.utils.model_zoo`. +These can be constructed by passing ``pretrained=True``: + +.. code:: python + + import torchvision.models as models + resnet18 = models.resnet18(pretrained=True) + alexnet = models.alexnet(pretrained=True) + squeezenet = models.squeezenet1_0(pretrained=True) + vgg16 = models.vgg16(pretrained=True) + densenet = models.densenet161(pretrained=True) + inception = models.inception_v3(pretrained=True) + googlenet = models.googlenet(pretrained=True) + shufflenet = models.shufflenet_v2_x1_0(pretrained=True) + mobilenet = models.mobilenet_v2(pretrained=True) + resnext50_32x4d = models.resnext50_32x4d(pretrained=True) + wide_resnet50_2 = models.wide_resnet50_2(pretrained=True) + mnasnet = models.mnasnet1_0(pretrained=True) + +Instancing a pre-trained model will download its weights to a cache directory. +This directory can be set using the `TORCH_MODEL_ZOO` environment variable. See +:func:`torch.utils.model_zoo.load_url` for details. + +Some models use modules which have different training and evaluation +behavior, such as batch normalization. To switch between these modes, use +``model.train()`` or ``model.eval()`` as appropriate. See +:meth:`~torch.nn.Module.train` or :meth:`~torch.nn.Module.eval` for details. + +All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), +where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized +using ``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``. +You can use the following transform to normalize:: + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + +An example of such normalization can be found in the imagenet example +`here `_ + +The process for obtaining the values of `mean` and `std` is roughly equivalent +to:: + + import torch + from torchvision import datasets, transforms as T + + transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()]) + dataset = datasets.ImageNet(".", split="train", transform=transform) + + means = [] + stds = [] + for img in subset(dataset): + means.append(torch.mean(img)) + stds.append(torch.std(img)) + + mean = torch.mean(torch.tensor(means)) + std = torch.mean(torch.tensor(stds)) + +Unfortunately, the concrete `subset` that was used is lost. For more +information see `this discussion `_ +or `these experiments `_. + +ImageNet 1-crop error rates (224x224) + +================================ ============= ============= +Network Top-1 error Top-5 error +================================ ============= ============= +AlexNet 43.45 20.91 +VGG-11 30.98 11.37 +VGG-13 30.07 10.75 +VGG-16 28.41 9.62 +VGG-19 27.62 9.12 +VGG-11 with batch normalization 29.62 10.19 +VGG-13 with batch normalization 28.45 9.63 +VGG-16 with batch normalization 26.63 8.50 +VGG-19 with batch normalization 25.76 8.15 +ResNet-18 30.24 10.92 +ResNet-34 26.70 8.58 +ResNet-50 23.85 7.13 +ResNet-101 22.63 6.44 +ResNet-152 21.69 5.94 +SqueezeNet 1.0 41.90 19.58 +SqueezeNet 1.1 41.81 19.38 +Densenet-121 25.35 7.83 +Densenet-169 24.00 7.00 +Densenet-201 22.80 6.43 +Densenet-161 22.35 6.20 +Inception v3 22.55 6.44 +GoogleNet 30.22 10.47 +ShuffleNet V2 30.64 11.68 +MobileNet V2 28.12 9.71 +ResNeXt-50-32x4d 22.38 6.30 +ResNeXt-101-32x8d 20.69 5.47 +Wide ResNet-50-2 21.49 5.91 +Wide ResNet-101-2 21.16 5.72 +MNASNet 1.0 26.49 8.456 +================================ ============= ============= + + +.. _AlexNet: https://arxiv.org/abs/1404.5997 +.. _VGG: https://arxiv.org/abs/1409.1556 +.. _ResNet: https://arxiv.org/abs/1512.03385 +.. _SqueezeNet: https://arxiv.org/abs/1602.07360 +.. _DenseNet: https://arxiv.org/abs/1608.06993 +.. _Inception: https://arxiv.org/abs/1512.00567 +.. _GoogLeNet: https://arxiv.org/abs/1409.4842 +.. _ShuffleNet: https://arxiv.org/abs/1807.11164 +.. _MobileNet: https://arxiv.org/abs/1801.04381 +.. _ResNeXt: https://arxiv.org/abs/1611.05431 +.. _MNASNet: https://arxiv.org/abs/1807.11626 + +.. currentmodule:: torchvision.models + +Alexnet +------- + +.. autofunction:: alexnet + +VGG +--- + +.. autofunction:: vgg11 +.. autofunction:: vgg11_bn +.. autofunction:: vgg13 +.. autofunction:: vgg13_bn +.. autofunction:: vgg16 +.. autofunction:: vgg16_bn +.. autofunction:: vgg19 +.. autofunction:: vgg19_bn + + +ResNet +------ + +.. autofunction:: resnet18 +.. autofunction:: resnet34 +.. autofunction:: resnet50 +.. autofunction:: resnet101 +.. autofunction:: resnet152 + +SqueezeNet +---------- + +.. autofunction:: squeezenet1_0 +.. autofunction:: squeezenet1_1 + +DenseNet +--------- + +.. autofunction:: densenet121 +.. autofunction:: densenet169 +.. autofunction:: densenet161 +.. autofunction:: densenet201 + +Inception v3 +------------ + +.. autofunction:: inception_v3 + +.. note :: + This requires `scipy` to be installed + + +GoogLeNet +------------ + +.. autofunction:: googlenet + +.. note :: + This requires `scipy` to be installed + + +ShuffleNet v2 +------------- + +.. autofunction:: shufflenet_v2_x0_5 +.. autofunction:: shufflenet_v2_x1_0 +.. autofunction:: shufflenet_v2_x1_5 +.. autofunction:: shufflenet_v2_x2_0 + +MobileNet v2 +------------- + +.. autofunction:: mobilenet_v2 + +ResNext +------- + +.. autofunction:: resnext50_32x4d +.. autofunction:: resnext101_32x8d + +Wide ResNet +----------- + +.. autofunction:: wide_resnet50_2 +.. autofunction:: wide_resnet101_2 + +MNASNet +-------- + +.. autofunction:: mnasnet0_5 +.. autofunction:: mnasnet0_75 +.. autofunction:: mnasnet1_0 +.. autofunction:: mnasnet1_3 + + +Semantic Segmentation +===================== + +The models subpackage contains definitions for the following model +architectures for semantic segmentation: + +- `FCN ResNet50, ResNet101 `_ +- `DeepLabV3 ResNet50, ResNet101 `_ + +As with image classification models, all pre-trained models expect input images normalized in the same way. +The images have to be loaded in to a range of ``[0, 1]`` and then normalized using +``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``. +They have been trained on images resized such that their minimum size is 520. + +The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are +present in the Pascal VOC dataset. You can see more information on how the subset has been selected in +``references/segmentation/coco_utils.py``. The classes that the pre-trained model outputs are the following, +in order: + + .. code-block:: python + + ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] + +The accuracies of the pre-trained models evaluated on COCO val2017 are as follows + +================================ ============= ==================== +Network mean IoU global pixelwise acc +================================ ============= ==================== +FCN ResNet50 60.5 91.4 +FCN ResNet101 63.7 91.9 +DeepLabV3 ResNet50 66.4 92.4 +DeepLabV3 ResNet101 67.4 92.4 +================================ ============= ==================== + + +Fully Convolutional Networks +---------------------------- + +.. autofunction:: torchvision.models.segmentation.fcn_resnet50 +.. autofunction:: torchvision.models.segmentation.fcn_resnet101 + + +DeepLabV3 +--------- + +.. autofunction:: torchvision.models.segmentation.deeplabv3_resnet50 +.. autofunction:: torchvision.models.segmentation.deeplabv3_resnet101 + + +Object Detection, Instance Segmentation and Person Keypoint Detection +===================================================================== + +The models subpackage contains definitions for the following model +architectures for detection: + +- `Faster R-CNN ResNet-50 FPN `_ +- `Mask R-CNN ResNet-50 FPN `_ + +The pre-trained models for detection, instance segmentation and +keypoint detection are initialized with the classification models +in torchvision. + +The models expect a list of ``Tensor[C, H, W]``, in the range ``0-1``. +The models internally resize the images so that they have a minimum size +of ``800``. This option can be changed by passing the option ``min_size`` +to the constructor of the models. + + +For object detection and instance segmentation, the pre-trained +models return the predictions of the following classes: + + .. code-block:: python + + COCO_INSTANCE_CATEGORY_NAMES = [ + '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', + 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', + 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', + 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', + 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' + ] + + +Here are the summary of the accuracies for the models trained on +the instances set of COCO train2017 and evaluated on COCO val2017. + +================================ ======= ======== =========== +Network box AP mask AP keypoint AP +================================ ======= ======== =========== +Faster R-CNN ResNet-50 FPN 37.0 - - +RetinaNet ResNet-50 FPN 36.4 - - +Mask R-CNN ResNet-50 FPN 37.9 34.6 - +================================ ======= ======== =========== + +For person keypoint detection, the accuracies for the pre-trained +models are as follows + +================================ ======= ======== =========== +Network box AP mask AP keypoint AP +================================ ======= ======== =========== +Keypoint R-CNN ResNet-50 FPN 54.6 - 65.0 +================================ ======= ======== =========== + +For person keypoint detection, the pre-trained model return the +keypoints in the following order: + + .. code-block:: python + + COCO_PERSON_KEYPOINT_NAMES = [ + 'nose', + 'left_eye', + 'right_eye', + 'left_ear', + 'right_ear', + 'left_shoulder', + 'right_shoulder', + 'left_elbow', + 'right_elbow', + 'left_wrist', + 'right_wrist', + 'left_hip', + 'right_hip', + 'left_knee', + 'right_knee', + 'left_ankle', + 'right_ankle' + ] + +Runtime characteristics +----------------------- + +The implementations of the models for object detection, instance segmentation +and keypoint detection are efficient. + +In the following table, we use 8 V100 GPUs, with CUDA 10.0 and CUDNN 7.4 to +report the results. During training, we use a batch size of 2 per GPU, and +during testing a batch size of 1 is used. + +For test time, we report the time for the model evaluation and postprocessing +(including mask pasting in image), but not the time for computing the +precision-recall. + +============================== =================== ================== =========== +Network train time (s / it) test time (s / it) memory (GB) +============================== =================== ================== =========== +Faster R-CNN ResNet-50 FPN 0.2288 0.0590 5.2 +RetinaNet ResNet-50 FPN 0.2514 0.0939 4.1 +Mask R-CNN ResNet-50 FPN 0.2728 0.0903 5.4 +Keypoint R-CNN ResNet-50 FPN 0.3789 0.1242 6.8 +============================== =================== ================== =========== + + +Faster R-CNN +------------ + +.. autofunction:: torchvision.models.detection.fasterrcnn_resnet50_fpn + + +RetinaNet +------------ + +.. autofunction:: torchvision.models.detection.retinanet_resnet50_fpn + + +Mask R-CNN +---------- + +.. autofunction:: torchvision.models.detection.maskrcnn_resnet50_fpn + + +Keypoint R-CNN +-------------- + +.. autofunction:: torchvision.models.detection.keypointrcnn_resnet50_fpn + + +Video classification +==================== + +We provide models for action recognition pre-trained on Kinetics-400. +They have all been trained with the scripts provided in ``references/video_classification``. + +All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W), +where H and W are expected to be 112, and T is a number of video frames in a clip. +The images have to be loaded in to a range of [0, 1] and then normalized +using ``mean = [0.43216, 0.394666, 0.37645]`` and ``std = [0.22803, 0.22145, 0.216989]``. + + +.. note:: + The normalization parameters are different from the image classification ones, and correspond + to the mean and std from Kinetics-400. + +.. note:: + For now, normalization code can be found in ``references/video_classification/transforms.py``, + see the ``Normalize`` function there. Note that it differs from standard normalization for + images because it assumes the video is 4d. + +Kinetics 1-crop accuracies for clip length 16 (16x112x112) + +================================ ============= ============= +Network Clip acc@1 Clip acc@5 +================================ ============= ============= +ResNet 3D 18 52.75 75.45 +ResNet MC 18 53.90 76.29 +ResNet (2+1)D 57.50 78.81 +================================ ============= ============= + + +ResNet 3D +---------- + +.. autofunction:: torchvision.models.video.r3d_18 + +ResNet Mixed Convolution +------------------------ + +.. autofunction:: torchvision.models.video.mc3_18 + +ResNet (2+1)D +------------- + +.. autofunction:: torchvision.models.video.r2plus1d_18 diff --git a/docs/1.7.0/torchvision/_sources/ops.rst.txt b/docs/1.7.0/torchvision/_sources/ops.rst.txt new file mode 100644 index 000000000000..6b3b24510099 --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/ops.rst.txt @@ -0,0 +1,32 @@ +torchvision.ops +=============== + +.. currentmodule:: torchvision.ops + +:mod:`torchvision.ops` implements operators that are specific for Computer Vision. + +.. note:: + All operators have native support for TorchScript. + + +.. autofunction:: nms +.. autofunction:: batched_nms +.. autofunction:: remove_small_boxes +.. autofunction:: clip_boxes_to_image +.. autofunction:: box_convert +.. autofunction:: box_area +.. autofunction:: box_iou +.. autofunction:: generalized_box_iou +.. autofunction:: roi_align +.. autofunction:: ps_roi_align +.. autofunction:: roi_pool +.. autofunction:: ps_roi_pool +.. autofunction:: deform_conv2d + +.. autoclass:: RoIAlign +.. autoclass:: PSRoIAlign +.. autoclass:: RoIPool +.. autoclass:: PSRoIPool +.. autoclass:: DeformConv2d +.. autoclass:: MultiScaleRoIAlign +.. autoclass:: FeaturePyramidNetwork diff --git a/docs/1.7.0/torchvision/_sources/transforms.rst.txt b/docs/1.7.0/torchvision/_sources/transforms.rst.txt new file mode 100644 index 000000000000..517ede35dbff --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/transforms.rst.txt @@ -0,0 +1,206 @@ +torchvision.transforms +====================== + +.. currentmodule:: torchvision.transforms + +Transforms are common image transformations. They can be chained together using :class:`Compose`. +Additionally, there is the :mod:`torchvision.transforms.functional` module. +Functional transforms give fine-grained control over the transformations. +This is useful if you have to build a more complex transformation pipeline +(e.g. in the case of segmentation tasks). + +All transformations accept PIL Image, Tensor Image or batch of Tensor Images as input. Tensor Image is a tensor with +``(C, H, W)`` shape, where ``C`` is a number of channels, ``H`` and ``W`` are image height and width. Batch of +Tensor Images is a tensor of ``(B, C, H, W)`` shape, where ``B`` is a number of images in the batch. Deterministic or +random transformations applied on the batch of Tensor Images identically transform all the images of the batch. + +.. warning:: + + Since v0.8.0 all random transformations are using torch default random generator to sample random parameters. + It is a backward compatibility breaking change and user should set the random state as following: + + .. code:: python + + # Previous versions + # import random + # random.seed(12) + + # Now + import torch + torch.manual_seed(17) + + Please, keep in mind that the same seed for torch random generator and Python random generator will not + produce the same results. + + +Scriptable transforms +--------------------- + +In order to script the transformations, please use ``torch.nn.Sequential`` instead of :class:`Compose`. + +.. code:: python + + transforms = torch.nn.Sequential( + transforms.CenterCrop(10), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ) + scripted_transforms = torch.jit.script(transforms) + +Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor`` and does not require +`lambda` functions or ``PIL.Image``. + +For any custom transformations to be used with ``torch.jit.script``, they should be derived from ``torch.nn.Module``. + + +Compositions of transforms +-------------------------- + +.. autoclass:: Compose + +Transforms on PIL Image and torch.\*Tensor +------------------------------------------ + +.. autoclass:: CenterCrop + :members: + +.. autoclass:: ColorJitter + :members: + +.. autoclass:: FiveCrop + :members: + +.. autoclass:: Grayscale + :members: + +.. autoclass:: Pad + :members: + +.. autoclass:: RandomAffine + :members: + +.. autoclass:: RandomApply + +.. autoclass:: RandomCrop + :members: + +.. autoclass:: RandomGrayscale + :members: + +.. autoclass:: RandomHorizontalFlip + :members: + +.. autoclass:: RandomPerspective + :members: + +.. autoclass:: RandomResizedCrop + :members: + +.. autoclass:: RandomRotation + :members: + +.. autoclass:: RandomSizedCrop + :members: + +.. autoclass:: RandomVerticalFlip + :members: + +.. autoclass:: Resize + :members: + +.. autoclass:: Scale + :members: + +.. autoclass:: TenCrop + :members: + +.. autoclass:: GaussianBlur + :members: + +Transforms on PIL Image only +---------------------------- + +.. autoclass:: RandomChoice + +.. autoclass:: RandomOrder + + +Transforms on torch.\*Tensor only +--------------------------------- + +.. autoclass:: LinearTransformation + :members: + +.. autoclass:: Normalize + :members: + +.. autoclass:: RandomErasing + :members: + +.. autoclass:: ConvertImageDtype + + +Conversion Transforms +--------------------- + +.. autoclass:: ToPILImage + :members: + +.. autoclass:: ToTensor + :members: + + +Generic Transforms +------------------ + +.. autoclass:: Lambda + :members: + + +Functional Transforms +--------------------- + +Functional transforms give you fine-grained control of the transformation pipeline. +As opposed to the transformations above, functional transforms don't contain a random number +generator for their parameters. +That means you have to specify/generate all parameters, but you can reuse the functional transform. + +Example: +you can apply a functional transform with the same parameters to multiple images like this: + +.. code:: python + + import torchvision.transforms.functional as TF + import random + + def my_segmentation_transforms(image, segmentation): + if random.random() > 0.5: + angle = random.randint(-30, 30) + image = TF.rotate(image, angle) + segmentation = TF.rotate(segmentation, angle) + # more transforms ... + return image, segmentation + + +Example: +you can use a functional transform to build transform classes with custom behavior: + +.. code:: python + + import torchvision.transforms.functional as TF + import random + + class MyRotationTransform: + """Rotate by one of the given angles.""" + + def __init__(self, angles): + self.angles = angles + + def __call__(self, x): + angle = random.choice(self.angles) + return TF.rotate(x, angle) + + rotation_transform = MyRotationTransform(angles=[-30, -15, 0, 15, 30]) + + +.. automodule:: torchvision.transforms.functional + :members: diff --git a/docs/1.7.0/torchvision/_sources/utils.rst.txt b/docs/1.7.0/torchvision/_sources/utils.rst.txt new file mode 100644 index 000000000000..ad2fc91c8974 --- /dev/null +++ b/docs/1.7.0/torchvision/_sources/utils.rst.txt @@ -0,0 +1,9 @@ +torchvision.utils +================= + +.. currentmodule:: torchvision.utils + +.. autofunction:: make_grid + +.. autofunction:: save_image + diff --git a/docs/1.7.0/torchvision/_static/ajax-loader.gif b/docs/1.7.0/torchvision/_static/ajax-loader.gif new file mode 100644 index 000000000000..61faf8cab239 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/ajax-loader.gif differ diff --git a/docs/1.7.0/torchvision/_static/basic.css b/docs/1.7.0/torchvision/_static/basic.css new file mode 100644 index 000000000000..19ced1057aeb --- /dev/null +++ b/docs/1.7.0/torchvision/_static/basic.css @@ -0,0 +1,665 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/comment-bright.png b/docs/1.7.0/torchvision/_static/comment-bright.png new file mode 100644 index 000000000000..15e27edb12ac Binary files /dev/null and b/docs/1.7.0/torchvision/_static/comment-bright.png differ diff --git a/docs/1.7.0/torchvision/_static/comment-close.png b/docs/1.7.0/torchvision/_static/comment-close.png new file mode 100644 index 000000000000..4d91bcf57de8 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/comment-close.png differ diff --git a/docs/1.7.0/torchvision/_static/comment.png b/docs/1.7.0/torchvision/_static/comment.png new file mode 100644 index 000000000000..dfbc0cbd512b Binary files /dev/null and b/docs/1.7.0/torchvision/_static/comment.png differ diff --git a/docs/1.7.0/torchvision/_static/css/pytorch_theme.css b/docs/1.7.0/torchvision/_static/css/pytorch_theme.css new file mode 100644 index 000000000000..0e54497643ce --- /dev/null +++ b/docs/1.7.0/torchvision/_static/css/pytorch_theme.css @@ -0,0 +1,118 @@ +body { + font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; +} + +/* Default header fonts are ugly */ +h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend, p.caption { + font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; +} + +/* Use white for docs background */ +.wy-side-nav-search { + background-color: #fff; +} + +.wy-nav-content-wrap, .wy-menu li.current > a { + background-color: #fff; +} + +@media screen and (min-width: 1400px) { + .wy-nav-content-wrap { + background-color: rgba(0, 0, 0, 0.0470588); + } + + .wy-nav-content { + background-color: #fff; + } +} + +/* Fixes for mobile */ +.wy-nav-top { + background-color: #fff; + background-image: url('../img/pytorch-logo-dark.svg'); + background-repeat: no-repeat; + background-position: center; + padding: 0; + margin: 0.4045em 0.809em; + color: #333; +} + +.wy-nav-top > a { + display: none; +} + +@media screen and (max-width: 768px) { + .wy-side-nav-search>a img.logo { + height: 60px; + } +} + +/* This is needed to ensure that logo above search scales properly */ +.wy-side-nav-search a { + display: block; +} + +/* This ensures that multiple constructors will remain in separate lines. */ +.rst-content dl:not(.docutils) dt { + display: table; +} + +/* Use our red for literals (it's very similar to the original color) */ +.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { + color: #F05732; +} + +.rst-content tt.xref, a .rst-content tt, .rst-content tt.xref, +.rst-content code.xref, a .rst-content tt, a .rst-content code { + color: #404040; +} + +/* Change link colors (except for the menu) */ + +a { + color: #F05732; +} + +a:hover { + color: #F05732; +} + + +a:visited { + color: #D44D2C; +} + +.wy-menu a { + color: #b3b3b3; +} + +.wy-menu a:hover { + color: #b3b3b3; +} + +/* Default footer text is quite big */ +footer { + font-size: 80%; +} + +footer .rst-footer-buttons { + font-size: 125%; /* revert footer settings - 1/80% = 125% */ +} + +footer p { + font-size: 100%; +} + +/* For hidden headers that appear in TOC tree */ +/* see http://stackoverflow.com/a/32363545/3343043 */ +.rst-content .hidden-section { + display: none; +} + +nav .hidden-section { + display: inherit; +} + +.wy-side-nav-search>div.version { + color: #000; +} diff --git a/docs/1.7.0/torchvision/_static/css/theme.css b/docs/1.7.0/torchvision/_static/css/theme.css new file mode 100644 index 000000000000..59fe0cd56456 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/css/theme.css @@ -0,0 +1,12282 @@ +@charset "UTF-8"; +/*! + * Bootstrap v4.0.0 (https://getbootstrap.com) + * Copyright 2011-2018 The Bootstrap Authors + * Copyright 2011-2018 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */ +:root { + --blue: #007bff; + --indigo: #6610f2; + --purple: #6f42c1; + --pink: #e83e8c; + --red: #dc3545; + --orange: #fd7e14; + --yellow: #ffc107; + --green: #28a745; + --teal: #20c997; + --cyan: #17a2b8; + --white: #fff; + --gray: #6c757d; + --gray-dark: #343a40; + --primary: #007bff; + --secondary: #6c757d; + --success: #28a745; + --info: #17a2b8; + --warning: #ffc107; + --danger: #dc3545; + --light: #f8f9fa; + --dark: #343a40; + --breakpoint-xs: 0; + --breakpoint-sm: 576px; + --breakpoint-md: 768px; + --breakpoint-lg: 992px; + --breakpoint-xl: 1200px; + --font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + --font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +*, +*::before, +*::after { + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +html { + font-family: sans-serif; + line-height: 1.15; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; + -ms-overflow-style: scrollbar; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); +} + +@-ms-viewport { + width: device-width; +} +article, aside, dialog, figcaption, figure, footer, header, hgroup, main, nav, section { + display: block; +} + +body { + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #212529; + text-align: left; + background-color: #fff; +} + +[tabindex="-1"]:focus { + outline: 0 !important; +} + +hr { + -webkit-box-sizing: content-box; + box-sizing: content-box; + height: 0; + overflow: visible; +} + +h1, h2, h3, h4, h5, h6 { + margin-top: 0; + margin-bottom: 0.5rem; +} + +p { + margin-top: 0; + margin-bottom: 1rem; +} + +abbr[title], +abbr[data-original-title] { + text-decoration: underline; + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; + cursor: help; + border-bottom: 0; +} + +address { + margin-bottom: 1rem; + font-style: normal; + line-height: inherit; +} + +ol, +ul, +dl { + margin-top: 0; + margin-bottom: 1rem; +} + +ol ol, +ul ul, +ol ul, +ul ol { + margin-bottom: 0; +} + +dt { + font-weight: 700; +} + +dd { + margin-bottom: .5rem; + margin-left: 0; +} + +blockquote { + margin: 0 0 1rem; +} + +dfn { + font-style: italic; +} + +b, +strong { + font-weight: bolder; +} + +small { + font-size: 80%; +} + +sub, +sup { + position: relative; + font-size: 75%; + line-height: 0; + vertical-align: baseline; +} + +sub { + bottom: -.25em; +} + +sup { + top: -.5em; +} + +a { + color: #007bff; + text-decoration: none; + background-color: transparent; + -webkit-text-decoration-skip: objects; +} +a:hover { + color: #0056b3; + text-decoration: underline; +} + +a:not([href]):not([tabindex]) { + color: inherit; + text-decoration: none; +} +a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus { + color: inherit; + text-decoration: none; +} +a:not([href]):not([tabindex]):focus { + outline: 0; +} + +pre, +code, +kbd, +samp { + font-family: monospace, monospace; + font-size: 1em; +} + +pre { + margin-top: 0; + margin-bottom: 1rem; + overflow: auto; + -ms-overflow-style: scrollbar; +} + +figure { + margin: 0 0 1rem; +} + +img { + vertical-align: middle; + border-style: none; +} + +svg:not(:root) { + overflow: hidden; +} + +table { + border-collapse: collapse; +} + +caption { + padding-top: 0.75rem; + padding-bottom: 0.75rem; + color: #6c757d; + text-align: left; + caption-side: bottom; +} + +th { + text-align: inherit; +} + +label { + display: inline-block; + margin-bottom: .5rem; +} + +button { + border-radius: 0; +} + +button:focus { + outline: 1px dotted; + outline: 5px auto -webkit-focus-ring-color; +} + +input, +button, +select, +optgroup, +textarea { + margin: 0; + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +button, +input { + overflow: visible; +} + +button, +select { + text-transform: none; +} + +button, +html [type="button"], +[type="reset"], +[type="submit"] { + -webkit-appearance: button; +} + +button::-moz-focus-inner, +[type="button"]::-moz-focus-inner, +[type="reset"]::-moz-focus-inner, +[type="submit"]::-moz-focus-inner { + padding: 0; + border-style: none; +} + +input[type="radio"], +input[type="checkbox"] { + -webkit-box-sizing: border-box; + box-sizing: border-box; + padding: 0; +} + +input[type="date"], +input[type="time"], +input[type="datetime-local"], +input[type="month"] { + -webkit-appearance: listbox; +} + +textarea { + overflow: auto; + resize: vertical; +} + +fieldset { + min-width: 0; + padding: 0; + margin: 0; + border: 0; +} + +legend { + display: block; + width: 100%; + max-width: 100%; + padding: 0; + margin-bottom: .5rem; + font-size: 1.5rem; + line-height: inherit; + color: inherit; + white-space: normal; +} + +progress { + vertical-align: baseline; +} + +[type="number"]::-webkit-inner-spin-button, +[type="number"]::-webkit-outer-spin-button { + height: auto; +} + +[type="search"] { + outline-offset: -2px; + -webkit-appearance: none; +} + +[type="search"]::-webkit-search-cancel-button, +[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} + +::-webkit-file-upload-button { + font: inherit; + -webkit-appearance: button; +} + +output { + display: inline-block; +} + +summary { + display: list-item; + cursor: pointer; +} + +template { + display: none; +} + +[hidden] { + display: none !important; +} + +h1, h2, h3, h4, h5, h6, +.h1, .h2, .h3, .h4, .h5, .h6 { + margin-bottom: 0.5rem; + font-family: inherit; + font-weight: 500; + line-height: 1.2; + color: inherit; +} + +h1, .h1 { + font-size: 2.5rem; +} + +h2, .h2 { + font-size: 2rem; +} + +h3, .h3 { + font-size: 1.75rem; +} + +h4, .h4 { + font-size: 1.5rem; +} + +h5, .h5 { + font-size: 1.25rem; +} + +h6, .h6 { + font-size: 1rem; +} + +.lead { + font-size: 1.25rem; + font-weight: 300; +} + +.display-1 { + font-size: 6rem; + font-weight: 300; + line-height: 1.2; +} + +.display-2 { + font-size: 5.5rem; + font-weight: 300; + line-height: 1.2; +} + +.display-3 { + font-size: 4.5rem; + font-weight: 300; + line-height: 1.2; +} + +.display-4 { + font-size: 3.5rem; + font-weight: 300; + line-height: 1.2; +} + +hr { + margin-top: 1rem; + margin-bottom: 1rem; + border: 0; + border-top: 1px solid rgba(0, 0, 0, 0.1); +} + +small, +.small { + font-size: 80%; + font-weight: 400; +} + +mark, +.mark { + padding: 0.2em; + background-color: #fcf8e3; +} + +.list-unstyled { + padding-left: 0; + list-style: none; +} + +.list-inline { + padding-left: 0; + list-style: none; +} + +.list-inline-item { + display: inline-block; +} +.list-inline-item:not(:last-child) { + margin-right: 0.5rem; +} + +.initialism { + font-size: 90%; + text-transform: uppercase; +} + +.blockquote { + margin-bottom: 1rem; + font-size: 1.25rem; +} + +.blockquote-footer { + display: block; + font-size: 80%; + color: #6c757d; +} +.blockquote-footer::before { + content: "\2014 \00A0"; +} + +.img-fluid { + max-width: 100%; + height: auto; +} + +.img-thumbnail { + padding: 0.25rem; + background-color: #fff; + border: 1px solid #dee2e6; + border-radius: 0.25rem; + max-width: 100%; + height: auto; +} + +.figure { + display: inline-block; +} + +.figure-img { + margin-bottom: 0.5rem; + line-height: 1; +} + +.figure-caption { + font-size: 90%; + color: #6c757d; +} + +code, +kbd, +pre, +samp { + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +code { + font-size: 87.5%; + color: #e83e8c; + word-break: break-word; +} +a > code { + color: inherit; +} + +kbd { + padding: 0.2rem 0.4rem; + font-size: 87.5%; + color: #fff; + background-color: #212529; + border-radius: 0.2rem; +} +kbd kbd { + padding: 0; + font-size: 100%; + font-weight: 700; +} + +pre { + display: block; + font-size: 87.5%; + color: #212529; +} +pre code { + font-size: inherit; + color: inherit; + word-break: normal; +} + +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; +} + +.container { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} +@media (min-width: 576px) { + .container { + max-width: 540px; + } +} +@media (min-width: 768px) { + .container { + max-width: 720px; + } +} +@media (min-width: 992px) { + .container { + max-width: 960px; + } +} +@media (min-width: 1200px) { + .container { + max-width: 1140px; + } +} + +.container-fluid { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} + +.row { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -15px; + margin-left: -15px; +} + +.no-gutters { + margin-right: 0; + margin-left: 0; +} +.no-gutters > .col, +.no-gutters > [class*="col-"] { + padding-right: 0; + padding-left: 0; +} + +.col-1, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-10, .col-11, .col-12, .col, +.col-auto, .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm, +.col-sm-auto, .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12, .col-md, +.col-md-auto, .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg, +.col-lg-auto, .col-xl-1, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl, +.col-xl-auto { + position: relative; + width: 100%; + min-height: 1px; + padding-right: 15px; + padding-left: 15px; +} + +.col { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; +} + +.col-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; +} + +.col-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; +} + +.col-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; +} + +.col-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; +} + +.col-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; +} + +.col-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; +} + +.col-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; +} + +.col-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; +} + +.col-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; +} + +.col-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; +} + +.col-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; +} + +.col-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; +} + +.col-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; +} + +.order-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; +} + +.order-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; +} + +.order-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; +} + +.order-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; +} + +.order-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; +} + +.order-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; +} + +.order-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; +} + +.order-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; +} + +.order-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; +} + +.order-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; +} + +.order-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; +} + +.order-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; +} + +.order-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; +} + +.order-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; +} + +.order-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; +} + +.offset-1 { + margin-left: 8.3333333333%; +} + +.offset-2 { + margin-left: 16.6666666667%; +} + +.offset-3 { + margin-left: 25%; +} + +.offset-4 { + margin-left: 33.3333333333%; +} + +.offset-5 { + margin-left: 41.6666666667%; +} + +.offset-6 { + margin-left: 50%; +} + +.offset-7 { + margin-left: 58.3333333333%; +} + +.offset-8 { + margin-left: 66.6666666667%; +} + +.offset-9 { + margin-left: 75%; +} + +.offset-10 { + margin-left: 83.3333333333%; +} + +.offset-11 { + margin-left: 91.6666666667%; +} + +@media (min-width: 576px) { + .col-sm { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-sm-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-sm-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-sm-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-sm-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-sm-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-sm-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-sm-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-sm-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-sm-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-sm-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-sm-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-sm-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-sm-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-sm-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-sm-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-sm-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-sm-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-sm-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-sm-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-sm-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-sm-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-sm-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-sm-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-sm-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-sm-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-sm-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-sm-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-sm-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-sm-0 { + margin-left: 0; + } + + .offset-sm-1 { + margin-left: 8.3333333333%; + } + + .offset-sm-2 { + margin-left: 16.6666666667%; + } + + .offset-sm-3 { + margin-left: 25%; + } + + .offset-sm-4 { + margin-left: 33.3333333333%; + } + + .offset-sm-5 { + margin-left: 41.6666666667%; + } + + .offset-sm-6 { + margin-left: 50%; + } + + .offset-sm-7 { + margin-left: 58.3333333333%; + } + + .offset-sm-8 { + margin-left: 66.6666666667%; + } + + .offset-sm-9 { + margin-left: 75%; + } + + .offset-sm-10 { + margin-left: 83.3333333333%; + } + + .offset-sm-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 768px) { + .col-md { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-md-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-md-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-md-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-md-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-md-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-md-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-md-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-md-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-md-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-md-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-md-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-md-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-md-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-md-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-md-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-md-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-md-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-md-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-md-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-md-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-md-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-md-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-md-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-md-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-md-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-md-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-md-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-md-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-md-0 { + margin-left: 0; + } + + .offset-md-1 { + margin-left: 8.3333333333%; + } + + .offset-md-2 { + margin-left: 16.6666666667%; + } + + .offset-md-3 { + margin-left: 25%; + } + + .offset-md-4 { + margin-left: 33.3333333333%; + } + + .offset-md-5 { + margin-left: 41.6666666667%; + } + + .offset-md-6 { + margin-left: 50%; + } + + .offset-md-7 { + margin-left: 58.3333333333%; + } + + .offset-md-8 { + margin-left: 66.6666666667%; + } + + .offset-md-9 { + margin-left: 75%; + } + + .offset-md-10 { + margin-left: 83.3333333333%; + } + + .offset-md-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 992px) { + .col-lg { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-lg-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-lg-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-lg-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-lg-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-lg-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-lg-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-lg-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-lg-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-lg-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-lg-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-lg-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-lg-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-lg-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-lg-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-lg-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-lg-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-lg-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-lg-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-lg-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-lg-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-lg-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-lg-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-lg-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-lg-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-lg-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-lg-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-lg-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-lg-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-lg-0 { + margin-left: 0; + } + + .offset-lg-1 { + margin-left: 8.3333333333%; + } + + .offset-lg-2 { + margin-left: 16.6666666667%; + } + + .offset-lg-3 { + margin-left: 25%; + } + + .offset-lg-4 { + margin-left: 33.3333333333%; + } + + .offset-lg-5 { + margin-left: 41.6666666667%; + } + + .offset-lg-6 { + margin-left: 50%; + } + + .offset-lg-7 { + margin-left: 58.3333333333%; + } + + .offset-lg-8 { + margin-left: 66.6666666667%; + } + + .offset-lg-9 { + margin-left: 75%; + } + + .offset-lg-10 { + margin-left: 83.3333333333%; + } + + .offset-lg-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 1200px) { + .col-xl { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-xl-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-xl-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-xl-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-xl-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-xl-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-xl-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-xl-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-xl-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-xl-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-xl-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-xl-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-xl-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-xl-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-xl-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-xl-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-xl-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-xl-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-xl-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-xl-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-xl-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-xl-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-xl-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-xl-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-xl-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-xl-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-xl-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-xl-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-xl-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-xl-0 { + margin-left: 0; + } + + .offset-xl-1 { + margin-left: 8.3333333333%; + } + + .offset-xl-2 { + margin-left: 16.6666666667%; + } + + .offset-xl-3 { + margin-left: 25%; + } + + .offset-xl-4 { + margin-left: 33.3333333333%; + } + + .offset-xl-5 { + margin-left: 41.6666666667%; + } + + .offset-xl-6 { + margin-left: 50%; + } + + .offset-xl-7 { + margin-left: 58.3333333333%; + } + + .offset-xl-8 { + margin-left: 66.6666666667%; + } + + .offset-xl-9 { + margin-left: 75%; + } + + .offset-xl-10 { + margin-left: 83.3333333333%; + } + + .offset-xl-11 { + margin-left: 91.6666666667%; + } +} +.table { + width: 100%; + max-width: 100%; + margin-bottom: 1rem; + background-color: transparent; +} +.table th, +.table td { + padding: 0.75rem; + vertical-align: top; + border-top: 1px solid #dee2e6; +} +.table thead th { + vertical-align: bottom; + border-bottom: 2px solid #dee2e6; +} +.table tbody + tbody { + border-top: 2px solid #dee2e6; +} +.table .table { + background-color: #fff; +} + +.table-sm th, +.table-sm td { + padding: 0.3rem; +} + +.table-bordered { + border: 1px solid #dee2e6; +} +.table-bordered th, +.table-bordered td { + border: 1px solid #dee2e6; +} +.table-bordered thead th, +.table-bordered thead td { + border-bottom-width: 2px; +} + +.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(0, 0, 0, 0.05); +} + +.table-hover tbody tr:hover { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-primary, +.table-primary > th, +.table-primary > td { + background-color: #b8daff; +} + +.table-hover .table-primary:hover { + background-color: #9fcdff; +} +.table-hover .table-primary:hover > td, +.table-hover .table-primary:hover > th { + background-color: #9fcdff; +} + +.table-secondary, +.table-secondary > th, +.table-secondary > td { + background-color: #d6d8db; +} + +.table-hover .table-secondary:hover { + background-color: #c8cbcf; +} +.table-hover .table-secondary:hover > td, +.table-hover .table-secondary:hover > th { + background-color: #c8cbcf; +} + +.table-success, +.table-success > th, +.table-success > td { + background-color: #c3e6cb; +} + +.table-hover .table-success:hover { + background-color: #b1dfbb; +} +.table-hover .table-success:hover > td, +.table-hover .table-success:hover > th { + background-color: #b1dfbb; +} + +.table-info, +.table-info > th, +.table-info > td { + background-color: #bee5eb; +} + +.table-hover .table-info:hover { + background-color: #abdde5; +} +.table-hover .table-info:hover > td, +.table-hover .table-info:hover > th { + background-color: #abdde5; +} + +.table-warning, +.table-warning > th, +.table-warning > td { + background-color: #ffeeba; +} + +.table-hover .table-warning:hover { + background-color: #ffe8a1; +} +.table-hover .table-warning:hover > td, +.table-hover .table-warning:hover > th { + background-color: #ffe8a1; +} + +.table-danger, +.table-danger > th, +.table-danger > td { + background-color: #f5c6cb; +} + +.table-hover .table-danger:hover { + background-color: #f1b0b7; +} +.table-hover .table-danger:hover > td, +.table-hover .table-danger:hover > th { + background-color: #f1b0b7; +} + +.table-light, +.table-light > th, +.table-light > td { + background-color: #fdfdfe; +} + +.table-hover .table-light:hover { + background-color: #ececf6; +} +.table-hover .table-light:hover > td, +.table-hover .table-light:hover > th { + background-color: #ececf6; +} + +.table-dark, +.table-dark > th, +.table-dark > td { + background-color: #c6c8ca; +} + +.table-hover .table-dark:hover { + background-color: #b9bbbe; +} +.table-hover .table-dark:hover > td, +.table-hover .table-dark:hover > th { + background-color: #b9bbbe; +} + +.table-active, +.table-active > th, +.table-active > td { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-hover .table-active:hover { + background-color: rgba(0, 0, 0, 0.075); +} +.table-hover .table-active:hover > td, +.table-hover .table-active:hover > th { + background-color: rgba(0, 0, 0, 0.075); +} + +.table .thead-dark th { + color: #fff; + background-color: #212529; + border-color: #32383e; +} +.table .thead-light th { + color: #495057; + background-color: #e9ecef; + border-color: #dee2e6; +} + +.table-dark { + color: #fff; + background-color: #212529; +} +.table-dark th, +.table-dark td, +.table-dark thead th { + border-color: #32383e; +} +.table-dark.table-bordered { + border: 0; +} +.table-dark.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(255, 255, 255, 0.05); +} +.table-dark.table-hover tbody tr:hover { + background-color: rgba(255, 255, 255, 0.075); +} + +@media (max-width: 575.98px) { + .table-responsive-sm { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-sm > .table-bordered { + border: 0; + } +} +@media (max-width: 767.98px) { + .table-responsive-md { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-md > .table-bordered { + border: 0; + } +} +@media (max-width: 991.98px) { + .table-responsive-lg { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-lg > .table-bordered { + border: 0; + } +} +@media (max-width: 1199.98px) { + .table-responsive-xl { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-xl > .table-bordered { + border: 0; + } +} +.table-responsive { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; +} +.table-responsive > .table-bordered { + border: 0; +} + +.form-control { + display: block; + width: 100%; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + color: #495057; + background-color: #fff; + background-clip: padding-box; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-transition: border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; +} +.form-control::-ms-expand { + background-color: transparent; + border: 0; +} +.form-control:focus { + color: #495057; + background-color: #fff; + border-color: #80bdff; + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.form-control::-webkit-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control:-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::placeholder { + color: #6c757d; + opacity: 1; +} +.form-control:disabled, .form-control[readonly] { + background-color: #e9ecef; + opacity: 1; +} + +select.form-control:not([size]):not([multiple]) { + height: calc(2.25rem + 2px); +} +select.form-control:focus::-ms-value { + color: #495057; + background-color: #fff; +} + +.form-control-file, +.form-control-range { + display: block; + width: 100%; +} + +.col-form-label { + padding-top: calc(0.375rem + 1px); + padding-bottom: calc(0.375rem + 1px); + margin-bottom: 0; + font-size: inherit; + line-height: 1.5; +} + +.col-form-label-lg { + padding-top: calc(0.5rem + 1px); + padding-bottom: calc(0.5rem + 1px); + font-size: 1.25rem; + line-height: 1.5; +} + +.col-form-label-sm { + padding-top: calc(0.25rem + 1px); + padding-bottom: calc(0.25rem + 1px); + font-size: 0.875rem; + line-height: 1.5; +} + +.form-control-plaintext { + display: block; + width: 100%; + padding-top: 0.375rem; + padding-bottom: 0.375rem; + margin-bottom: 0; + line-height: 1.5; + background-color: transparent; + border: solid transparent; + border-width: 1px 0; +} +.form-control-plaintext.form-control-sm, .input-group-sm > .form-control-plaintext.form-control, +.input-group-sm > .input-group-prepend > .form-control-plaintext.input-group-text, +.input-group-sm > .input-group-append > .form-control-plaintext.input-group-text, +.input-group-sm > .input-group-prepend > .form-control-plaintext.btn, +.input-group-sm > .input-group-append > .form-control-plaintext.btn, .form-control-plaintext.form-control-lg, .input-group-lg > .form-control-plaintext.form-control, +.input-group-lg > .input-group-prepend > .form-control-plaintext.input-group-text, +.input-group-lg > .input-group-append > .form-control-plaintext.input-group-text, +.input-group-lg > .input-group-prepend > .form-control-plaintext.btn, +.input-group-lg > .input-group-append > .form-control-plaintext.btn { + padding-right: 0; + padding-left: 0; +} + +.form-control-sm, .input-group-sm > .form-control, +.input-group-sm > .input-group-prepend > .input-group-text, +.input-group-sm > .input-group-append > .input-group-text, +.input-group-sm > .input-group-prepend > .btn, +.input-group-sm > .input-group-append > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +select.form-control-sm:not([size]):not([multiple]), .input-group-sm > select.form-control:not([size]):not([multiple]), +.input-group-sm > .input-group-prepend > select.input-group-text:not([size]):not([multiple]), +.input-group-sm > .input-group-append > select.input-group-text:not([size]):not([multiple]), +.input-group-sm > .input-group-prepend > select.btn:not([size]):not([multiple]), +.input-group-sm > .input-group-append > select.btn:not([size]):not([multiple]) { + height: calc(1.8125rem + 2px); +} + +.form-control-lg, .input-group-lg > .form-control, +.input-group-lg > .input-group-prepend > .input-group-text, +.input-group-lg > .input-group-append > .input-group-text, +.input-group-lg > .input-group-prepend > .btn, +.input-group-lg > .input-group-append > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +select.form-control-lg:not([size]):not([multiple]), .input-group-lg > select.form-control:not([size]):not([multiple]), +.input-group-lg > .input-group-prepend > select.input-group-text:not([size]):not([multiple]), +.input-group-lg > .input-group-append > select.input-group-text:not([size]):not([multiple]), +.input-group-lg > .input-group-prepend > select.btn:not([size]):not([multiple]), +.input-group-lg > .input-group-append > select.btn:not([size]):not([multiple]) { + height: calc(2.875rem + 2px); +} + +.form-group { + margin-bottom: 1rem; +} + +.form-text { + display: block; + margin-top: 0.25rem; +} + +.form-row { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -5px; + margin-left: -5px; +} +.form-row > .col, +.form-row > [class*="col-"] { + padding-right: 5px; + padding-left: 5px; +} + +.form-check { + position: relative; + display: block; + padding-left: 1.25rem; +} + +.form-check-input { + position: absolute; + margin-top: 0.3rem; + margin-left: -1.25rem; +} +.form-check-input:disabled ~ .form-check-label { + color: #6c757d; +} + +.form-check-label { + margin-bottom: 0; +} + +.form-check-inline { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding-left: 0; + margin-right: 0.75rem; +} +.form-check-inline .form-check-input { + position: static; + margin-top: 0; + margin-right: 0.3125rem; + margin-left: 0; +} + +.valid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #28a745; +} + +.valid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: .5rem; + margin-top: .1rem; + font-size: .875rem; + line-height: 1; + color: #fff; + background-color: rgba(40, 167, 69, 0.8); + border-radius: .2rem; +} + +.was-validated .form-control:valid, .form-control.is-valid, +.was-validated .custom-select:valid, +.custom-select.is-valid { + border-color: #28a745; +} +.was-validated .form-control:valid:focus, .form-control.is-valid:focus, +.was-validated .custom-select:valid:focus, +.custom-select.is-valid:focus { + border-color: #28a745; + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} +.was-validated .form-control:valid ~ .valid-feedback, +.was-validated .form-control:valid ~ .valid-tooltip, .form-control.is-valid ~ .valid-feedback, +.form-control.is-valid ~ .valid-tooltip, +.was-validated .custom-select:valid ~ .valid-feedback, +.was-validated .custom-select:valid ~ .valid-tooltip, +.custom-select.is-valid ~ .valid-feedback, +.custom-select.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .form-check-input:valid ~ .form-check-label, .form-check-input.is-valid ~ .form-check-label { + color: #28a745; +} +.was-validated .form-check-input:valid ~ .valid-feedback, +.was-validated .form-check-input:valid ~ .valid-tooltip, .form-check-input.is-valid ~ .valid-feedback, +.form-check-input.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .custom-control-input:valid ~ .custom-control-label, .custom-control-input.is-valid ~ .custom-control-label { + color: #28a745; +} +.was-validated .custom-control-input:valid ~ .custom-control-label::before, .custom-control-input.is-valid ~ .custom-control-label::before { + background-color: #71dd8a; +} +.was-validated .custom-control-input:valid ~ .valid-feedback, +.was-validated .custom-control-input:valid ~ .valid-tooltip, .custom-control-input.is-valid ~ .valid-feedback, +.custom-control-input.is-valid ~ .valid-tooltip { + display: block; +} +.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before, .custom-control-input.is-valid:checked ~ .custom-control-label::before { + background-color: #34ce57; +} +.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before, .custom-control-input.is-valid:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.was-validated .custom-file-input:valid ~ .custom-file-label, .custom-file-input.is-valid ~ .custom-file-label { + border-color: #28a745; +} +.was-validated .custom-file-input:valid ~ .custom-file-label::before, .custom-file-input.is-valid ~ .custom-file-label::before { + border-color: inherit; +} +.was-validated .custom-file-input:valid ~ .valid-feedback, +.was-validated .custom-file-input:valid ~ .valid-tooltip, .custom-file-input.is-valid ~ .valid-feedback, +.custom-file-input.is-valid ~ .valid-tooltip { + display: block; +} +.was-validated .custom-file-input:valid:focus ~ .custom-file-label, .custom-file-input.is-valid:focus ~ .custom-file-label { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.invalid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #dc3545; +} + +.invalid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: .5rem; + margin-top: .1rem; + font-size: .875rem; + line-height: 1; + color: #fff; + background-color: rgba(220, 53, 69, 0.8); + border-radius: .2rem; +} + +.was-validated .form-control:invalid, .form-control.is-invalid, +.was-validated .custom-select:invalid, +.custom-select.is-invalid { + border-color: #dc3545; +} +.was-validated .form-control:invalid:focus, .form-control.is-invalid:focus, +.was-validated .custom-select:invalid:focus, +.custom-select.is-invalid:focus { + border-color: #dc3545; + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} +.was-validated .form-control:invalid ~ .invalid-feedback, +.was-validated .form-control:invalid ~ .invalid-tooltip, .form-control.is-invalid ~ .invalid-feedback, +.form-control.is-invalid ~ .invalid-tooltip, +.was-validated .custom-select:invalid ~ .invalid-feedback, +.was-validated .custom-select:invalid ~ .invalid-tooltip, +.custom-select.is-invalid ~ .invalid-feedback, +.custom-select.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .form-check-input:invalid ~ .form-check-label, .form-check-input.is-invalid ~ .form-check-label { + color: #dc3545; +} +.was-validated .form-check-input:invalid ~ .invalid-feedback, +.was-validated .form-check-input:invalid ~ .invalid-tooltip, .form-check-input.is-invalid ~ .invalid-feedback, +.form-check-input.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .custom-control-input:invalid ~ .custom-control-label, .custom-control-input.is-invalid ~ .custom-control-label { + color: #dc3545; +} +.was-validated .custom-control-input:invalid ~ .custom-control-label::before, .custom-control-input.is-invalid ~ .custom-control-label::before { + background-color: #efa2a9; +} +.was-validated .custom-control-input:invalid ~ .invalid-feedback, +.was-validated .custom-control-input:invalid ~ .invalid-tooltip, .custom-control-input.is-invalid ~ .invalid-feedback, +.custom-control-input.is-invalid ~ .invalid-tooltip { + display: block; +} +.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before, .custom-control-input.is-invalid:checked ~ .custom-control-label::before { + background-color: #e4606d; +} +.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before, .custom-control-input.is-invalid:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.was-validated .custom-file-input:invalid ~ .custom-file-label, .custom-file-input.is-invalid ~ .custom-file-label { + border-color: #dc3545; +} +.was-validated .custom-file-input:invalid ~ .custom-file-label::before, .custom-file-input.is-invalid ~ .custom-file-label::before { + border-color: inherit; +} +.was-validated .custom-file-input:invalid ~ .invalid-feedback, +.was-validated .custom-file-input:invalid ~ .invalid-tooltip, .custom-file-input.is-invalid ~ .invalid-feedback, +.custom-file-input.is-invalid ~ .invalid-tooltip { + display: block; +} +.was-validated .custom-file-input:invalid:focus ~ .custom-file-label, .custom-file-input.is-invalid:focus ~ .custom-file-label { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.form-inline { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.form-inline .form-check { + width: 100%; +} +@media (min-width: 576px) { + .form-inline label { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + margin-bottom: 0; + } + .form-inline .form-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + margin-bottom: 0; + } + .form-inline .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .form-inline .form-control-plaintext { + display: inline-block; + } + .form-inline .input-group { + width: auto; + } + .form-inline .form-check { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + width: auto; + padding-left: 0; + } + .form-inline .form-check-input { + position: relative; + margin-top: 0; + margin-right: 0.25rem; + margin-left: 0; + } + .form-inline .custom-control { + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + } + .form-inline .custom-control-label { + margin-bottom: 0; + } +} + +.btn { + display: inline-block; + font-weight: 400; + text-align: center; + white-space: nowrap; + vertical-align: middle; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + border: 1px solid transparent; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + border-radius: 0.25rem; + -webkit-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; +} +.btn:hover, .btn:focus { + text-decoration: none; +} +.btn:focus, .btn.focus { + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.btn.disabled, .btn:disabled { + opacity: 0.65; +} +.btn:not(:disabled):not(.disabled) { + cursor: pointer; +} +.btn:not(:disabled):not(.disabled):active, .btn:not(:disabled):not(.disabled).active { + background-image: none; +} + +a.btn.disabled, +fieldset:disabled a.btn { + pointer-events: none; +} + +.btn-primary { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-primary:hover { + color: #fff; + background-color: #0069d9; + border-color: #0062cc; +} +.btn-primary:focus, .btn-primary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} +.btn-primary.disabled, .btn-primary:disabled { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-primary:not(:disabled):not(.disabled):active, .btn-primary:not(:disabled):not(.disabled).active, .show > .btn-primary.dropdown-toggle { + color: #fff; + background-color: #0062cc; + border-color: #005cbf; +} +.btn-primary:not(:disabled):not(.disabled):active:focus, .btn-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-primary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-secondary { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-secondary:hover { + color: #fff; + background-color: #5a6268; + border-color: #545b62; +} +.btn-secondary:focus, .btn-secondary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} +.btn-secondary.disabled, .btn-secondary:disabled { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-secondary:not(:disabled):not(.disabled):active, .btn-secondary:not(:disabled):not(.disabled).active, .show > .btn-secondary.dropdown-toggle { + color: #fff; + background-color: #545b62; + border-color: #4e555b; +} +.btn-secondary:not(:disabled):not(.disabled):active:focus, .btn-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-secondary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-success { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-success:hover { + color: #fff; + background-color: #218838; + border-color: #1e7e34; +} +.btn-success:focus, .btn-success.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} +.btn-success.disabled, .btn-success:disabled { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-success:not(:disabled):not(.disabled):active, .btn-success:not(:disabled):not(.disabled).active, .show > .btn-success.dropdown-toggle { + color: #fff; + background-color: #1e7e34; + border-color: #1c7430; +} +.btn-success:not(:disabled):not(.disabled):active:focus, .btn-success:not(:disabled):not(.disabled).active:focus, .show > .btn-success.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-info { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-info:hover { + color: #fff; + background-color: #138496; + border-color: #117a8b; +} +.btn-info:focus, .btn-info.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} +.btn-info.disabled, .btn-info:disabled { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-info:not(:disabled):not(.disabled):active, .btn-info:not(:disabled):not(.disabled).active, .show > .btn-info.dropdown-toggle { + color: #fff; + background-color: #117a8b; + border-color: #10707f; +} +.btn-info:not(:disabled):not(.disabled):active:focus, .btn-info:not(:disabled):not(.disabled).active:focus, .show > .btn-info.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-warning { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-warning:hover { + color: #212529; + background-color: #e0a800; + border-color: #d39e00; +} +.btn-warning:focus, .btn-warning.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} +.btn-warning.disabled, .btn-warning:disabled { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-warning:not(:disabled):not(.disabled):active, .btn-warning:not(:disabled):not(.disabled).active, .show > .btn-warning.dropdown-toggle { + color: #212529; + background-color: #d39e00; + border-color: #c69500; +} +.btn-warning:not(:disabled):not(.disabled):active:focus, .btn-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-warning.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-danger { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-danger:hover { + color: #fff; + background-color: #c82333; + border-color: #bd2130; +} +.btn-danger:focus, .btn-danger.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} +.btn-danger.disabled, .btn-danger:disabled { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-danger:not(:disabled):not(.disabled):active, .btn-danger:not(:disabled):not(.disabled).active, .show > .btn-danger.dropdown-toggle { + color: #fff; + background-color: #bd2130; + border-color: #b21f2d; +} +.btn-danger:not(:disabled):not(.disabled):active:focus, .btn-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-danger.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-light { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-light:hover { + color: #212529; + background-color: #e2e6ea; + border-color: #dae0e5; +} +.btn-light:focus, .btn-light.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} +.btn-light.disabled, .btn-light:disabled { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-light:not(:disabled):not(.disabled):active, .btn-light:not(:disabled):not(.disabled).active, .show > .btn-light.dropdown-toggle { + color: #212529; + background-color: #dae0e5; + border-color: #d3d9df; +} +.btn-light:not(:disabled):not(.disabled):active:focus, .btn-light:not(:disabled):not(.disabled).active:focus, .show > .btn-light.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-dark { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-dark:hover { + color: #fff; + background-color: #23272b; + border-color: #1d2124; +} +.btn-dark:focus, .btn-dark.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} +.btn-dark.disabled, .btn-dark:disabled { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-dark:not(:disabled):not(.disabled):active, .btn-dark:not(:disabled):not(.disabled).active, .show > .btn-dark.dropdown-toggle { + color: #fff; + background-color: #1d2124; + border-color: #171a1d; +} +.btn-dark:not(:disabled):not(.disabled):active:focus, .btn-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-dark.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-outline-primary { + color: #007bff; + background-color: transparent; + background-image: none; + border-color: #007bff; +} +.btn-outline-primary:hover { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-outline-primary:focus, .btn-outline-primary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} +.btn-outline-primary.disabled, .btn-outline-primary:disabled { + color: #007bff; + background-color: transparent; +} +.btn-outline-primary:not(:disabled):not(.disabled):active, .btn-outline-primary:not(:disabled):not(.disabled).active, .show > .btn-outline-primary.dropdown-toggle { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-outline-primary:not(:disabled):not(.disabled):active:focus, .btn-outline-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-primary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-outline-secondary { + color: #6c757d; + background-color: transparent; + background-image: none; + border-color: #6c757d; +} +.btn-outline-secondary:hover { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-outline-secondary:focus, .btn-outline-secondary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} +.btn-outline-secondary.disabled, .btn-outline-secondary:disabled { + color: #6c757d; + background-color: transparent; +} +.btn-outline-secondary:not(:disabled):not(.disabled):active, .btn-outline-secondary:not(:disabled):not(.disabled).active, .show > .btn-outline-secondary.dropdown-toggle { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-outline-secondary:not(:disabled):not(.disabled):active:focus, .btn-outline-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-secondary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-outline-success { + color: #28a745; + background-color: transparent; + background-image: none; + border-color: #28a745; +} +.btn-outline-success:hover { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-outline-success:focus, .btn-outline-success.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} +.btn-outline-success.disabled, .btn-outline-success:disabled { + color: #28a745; + background-color: transparent; +} +.btn-outline-success:not(:disabled):not(.disabled):active, .btn-outline-success:not(:disabled):not(.disabled).active, .show > .btn-outline-success.dropdown-toggle { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-outline-success:not(:disabled):not(.disabled):active:focus, .btn-outline-success:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-success.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-outline-info { + color: #17a2b8; + background-color: transparent; + background-image: none; + border-color: #17a2b8; +} +.btn-outline-info:hover { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-outline-info:focus, .btn-outline-info.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} +.btn-outline-info.disabled, .btn-outline-info:disabled { + color: #17a2b8; + background-color: transparent; +} +.btn-outline-info:not(:disabled):not(.disabled):active, .btn-outline-info:not(:disabled):not(.disabled).active, .show > .btn-outline-info.dropdown-toggle { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-outline-info:not(:disabled):not(.disabled):active:focus, .btn-outline-info:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-info.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-outline-warning { + color: #ffc107; + background-color: transparent; + background-image: none; + border-color: #ffc107; +} +.btn-outline-warning:hover { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-outline-warning:focus, .btn-outline-warning.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} +.btn-outline-warning.disabled, .btn-outline-warning:disabled { + color: #ffc107; + background-color: transparent; +} +.btn-outline-warning:not(:disabled):not(.disabled):active, .btn-outline-warning:not(:disabled):not(.disabled).active, .show > .btn-outline-warning.dropdown-toggle { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-outline-warning:not(:disabled):not(.disabled):active:focus, .btn-outline-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-warning.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-outline-danger { + color: #dc3545; + background-color: transparent; + background-image: none; + border-color: #dc3545; +} +.btn-outline-danger:hover { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-outline-danger:focus, .btn-outline-danger.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} +.btn-outline-danger.disabled, .btn-outline-danger:disabled { + color: #dc3545; + background-color: transparent; +} +.btn-outline-danger:not(:disabled):not(.disabled):active, .btn-outline-danger:not(:disabled):not(.disabled).active, .show > .btn-outline-danger.dropdown-toggle { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-outline-danger:not(:disabled):not(.disabled):active:focus, .btn-outline-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-danger.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-outline-light { + color: #f8f9fa; + background-color: transparent; + background-image: none; + border-color: #f8f9fa; +} +.btn-outline-light:hover { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-outline-light:focus, .btn-outline-light.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} +.btn-outline-light.disabled, .btn-outline-light:disabled { + color: #f8f9fa; + background-color: transparent; +} +.btn-outline-light:not(:disabled):not(.disabled):active, .btn-outline-light:not(:disabled):not(.disabled).active, .show > .btn-outline-light.dropdown-toggle { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-outline-light:not(:disabled):not(.disabled):active:focus, .btn-outline-light:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-light.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-outline-dark { + color: #343a40; + background-color: transparent; + background-image: none; + border-color: #343a40; +} +.btn-outline-dark:hover { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-outline-dark:focus, .btn-outline-dark.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} +.btn-outline-dark.disabled, .btn-outline-dark:disabled { + color: #343a40; + background-color: transparent; +} +.btn-outline-dark:not(:disabled):not(.disabled):active, .btn-outline-dark:not(:disabled):not(.disabled).active, .show > .btn-outline-dark.dropdown-toggle { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-outline-dark:not(:disabled):not(.disabled):active:focus, .btn-outline-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-dark.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-link { + font-weight: 400; + color: #007bff; + background-color: transparent; +} +.btn-link:hover { + color: #0056b3; + text-decoration: underline; + background-color: transparent; + border-color: transparent; +} +.btn-link:focus, .btn-link.focus { + text-decoration: underline; + border-color: transparent; + -webkit-box-shadow: none; + box-shadow: none; +} +.btn-link:disabled, .btn-link.disabled { + color: #6c757d; +} + +.btn-lg, .btn-group-lg > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +.btn-sm, .btn-group-sm > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +.btn-block { + display: block; + width: 100%; +} +.btn-block + .btn-block { + margin-top: 0.5rem; +} + +input[type="submit"].btn-block, +input[type="reset"].btn-block, +input[type="button"].btn-block { + width: 100%; +} + +.fade { + opacity: 0; + -webkit-transition: opacity 0.15s linear; + transition: opacity 0.15s linear; +} +.fade.show { + opacity: 1; +} + +.collapse { + display: none; +} +.collapse.show { + display: block; +} + +tr.collapse.show { + display: table-row; +} + +tbody.collapse.show { + display: table-row-group; +} + +.collapsing { + position: relative; + height: 0; + overflow: hidden; + -webkit-transition: height 0.35s ease; + transition: height 0.35s ease; +} + +.dropup, +.dropdown { + position: relative; +} + +.dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid; + border-right: 0.3em solid transparent; + border-bottom: 0; + border-left: 0.3em solid transparent; +} +.dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + margin: 0.125rem 0 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.dropup .dropdown-menu { + margin-top: 0; + margin-bottom: 0.125rem; +} +.dropup .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0; + border-right: 0.3em solid transparent; + border-bottom: 0.3em solid; + border-left: 0.3em solid transparent; +} +.dropup .dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropright .dropdown-menu { + margin-top: 0; + margin-left: 0.125rem; +} +.dropright .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-bottom: 0.3em solid transparent; + border-left: 0.3em solid; +} +.dropright .dropdown-toggle:empty::after { + margin-left: 0; +} +.dropright .dropdown-toggle::after { + vertical-align: 0; +} + +.dropleft .dropdown-menu { + margin-top: 0; + margin-right: 0.125rem; +} +.dropleft .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; +} +.dropleft .dropdown-toggle::after { + display: none; +} +.dropleft .dropdown-toggle::before { + display: inline-block; + width: 0; + height: 0; + margin-right: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0.3em solid; + border-bottom: 0.3em solid transparent; +} +.dropleft .dropdown-toggle:empty::after { + margin-left: 0; +} +.dropleft .dropdown-toggle::before { + vertical-align: 0; +} + +.dropdown-divider { + height: 0; + margin: 0.5rem 0; + overflow: hidden; + border-top: 1px solid #e9ecef; +} + +.dropdown-item { + display: block; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #212529; + text-align: inherit; + white-space: nowrap; + background-color: transparent; + border: 0; +} +.dropdown-item:hover, .dropdown-item:focus { + color: #16181b; + text-decoration: none; + background-color: #f8f9fa; +} +.dropdown-item.active, .dropdown-item:active { + color: #fff; + text-decoration: none; + background-color: #007bff; +} +.dropdown-item.disabled, .dropdown-item:disabled { + color: #6c757d; + background-color: transparent; +} + +.dropdown-menu.show { + display: block; +} + +.dropdown-header { + display: block; + padding: 0.5rem 1.5rem; + margin-bottom: 0; + font-size: 0.875rem; + color: #6c757d; + white-space: nowrap; +} + +.btn-group, +.btn-group-vertical { + position: relative; + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + vertical-align: middle; +} +.btn-group > .btn, +.btn-group-vertical > .btn { + position: relative; + -webkit-box-flex: 0; + -ms-flex: 0 1 auto; + flex: 0 1 auto; +} +.btn-group > .btn:hover, +.btn-group-vertical > .btn:hover { + z-index: 1; +} +.btn-group > .btn:focus, .btn-group > .btn:active, .btn-group > .btn.active, +.btn-group-vertical > .btn:focus, +.btn-group-vertical > .btn:active, +.btn-group-vertical > .btn.active { + z-index: 1; +} +.btn-group .btn + .btn, +.btn-group .btn + .btn-group, +.btn-group .btn-group + .btn, +.btn-group .btn-group + .btn-group, +.btn-group-vertical .btn + .btn, +.btn-group-vertical .btn + .btn-group, +.btn-group-vertical .btn-group + .btn, +.btn-group-vertical .btn-group + .btn-group { + margin-left: -1px; +} + +.btn-toolbar { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; +} +.btn-toolbar .input-group { + width: auto; +} + +.btn-group > .btn:first-child { + margin-left: 0; +} +.btn-group > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group > .btn-group:not(:last-child) > .btn { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.btn-group > .btn:not(:first-child), +.btn-group > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.dropdown-toggle-split { + padding-right: 0.5625rem; + padding-left: 0.5625rem; +} +.dropdown-toggle-split::after { + margin-left: 0; +} + +.btn-sm + .dropdown-toggle-split, .btn-group-sm > .btn + .dropdown-toggle-split { + padding-right: 0.375rem; + padding-left: 0.375rem; +} + +.btn-lg + .dropdown-toggle-split, .btn-group-lg > .btn + .dropdown-toggle-split { + padding-right: 0.75rem; + padding-left: 0.75rem; +} + +.btn-group-vertical { + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; +} +.btn-group-vertical .btn, +.btn-group-vertical .btn-group { + width: 100%; +} +.btn-group-vertical > .btn + .btn, +.btn-group-vertical > .btn + .btn-group, +.btn-group-vertical > .btn-group + .btn, +.btn-group-vertical > .btn-group + .btn-group { + margin-top: -1px; + margin-left: 0; +} +.btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group-vertical > .btn-group:not(:last-child) > .btn { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn:not(:first-child), +.btn-group-vertical > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.btn-group-toggle > .btn, +.btn-group-toggle > .btn-group > .btn { + margin-bottom: 0; +} +.btn-group-toggle > .btn input[type="radio"], +.btn-group-toggle > .btn input[type="checkbox"], +.btn-group-toggle > .btn-group > .btn input[type="radio"], +.btn-group-toggle > .btn-group > .btn input[type="checkbox"] { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; +} + +.input-group { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: stretch; + -ms-flex-align: stretch; + align-items: stretch; + width: 100%; +} +.input-group > .form-control, +.input-group > .custom-select, +.input-group > .custom-file { + position: relative; + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + width: 1%; + margin-bottom: 0; +} +.input-group > .form-control:focus, +.input-group > .custom-select:focus, +.input-group > .custom-file:focus { + z-index: 3; +} +.input-group > .form-control + .form-control, +.input-group > .form-control + .custom-select, +.input-group > .form-control + .custom-file, +.input-group > .custom-select + .form-control, +.input-group > .custom-select + .custom-select, +.input-group > .custom-select + .custom-file, +.input-group > .custom-file + .form-control, +.input-group > .custom-file + .custom-select, +.input-group > .custom-file + .custom-file { + margin-left: -1px; +} +.input-group > .form-control:not(:last-child), +.input-group > .custom-select:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group > .form-control:not(:first-child), +.input-group > .custom-select:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} +.input-group > .custom-file { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.input-group > .custom-file:not(:last-child) .custom-file-label, .input-group > .custom-file:not(:last-child) .custom-file-label::before { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group > .custom-file:not(:first-child) .custom-file-label, .input-group > .custom-file:not(:first-child) .custom-file-label::before { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.input-group-prepend, +.input-group-append { + display: -webkit-box; + display: -ms-flexbox; + display: flex; +} +.input-group-prepend .btn, +.input-group-append .btn { + position: relative; + z-index: 2; +} +.input-group-prepend .btn + .btn, +.input-group-prepend .btn + .input-group-text, +.input-group-prepend .input-group-text + .input-group-text, +.input-group-prepend .input-group-text + .btn, +.input-group-append .btn + .btn, +.input-group-append .btn + .input-group-text, +.input-group-append .input-group-text + .input-group-text, +.input-group-append .input-group-text + .btn { + margin-left: -1px; +} + +.input-group-prepend { + margin-right: -1px; +} + +.input-group-append { + margin-left: -1px; +} + +.input-group-text { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding: 0.375rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + text-align: center; + white-space: nowrap; + background-color: #e9ecef; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} +.input-group-text input[type="radio"], +.input-group-text input[type="checkbox"] { + margin-top: 0; +} + +.input-group > .input-group-prepend > .btn, +.input-group > .input-group-prepend > .input-group-text, +.input-group > .input-group-append:not(:last-child) > .btn, +.input-group > .input-group-append:not(:last-child) > .input-group-text, +.input-group > .input-group-append:last-child > .btn:not(:last-child):not(.dropdown-toggle), +.input-group > .input-group-append:last-child > .input-group-text:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.input-group > .input-group-append > .btn, +.input-group > .input-group-append > .input-group-text, +.input-group > .input-group-prepend:not(:first-child) > .btn, +.input-group > .input-group-prepend:not(:first-child) > .input-group-text, +.input-group > .input-group-prepend:first-child > .btn:not(:first-child), +.input-group > .input-group-prepend:first-child > .input-group-text:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.custom-control { + position: relative; + display: block; + min-height: 1.5rem; + padding-left: 1.5rem; +} + +.custom-control-inline { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + margin-right: 1rem; +} + +.custom-control-input { + position: absolute; + z-index: -1; + opacity: 0; +} +.custom-control-input:checked ~ .custom-control-label::before { + color: #fff; + background-color: #007bff; +} +.custom-control-input:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.custom-control-input:active ~ .custom-control-label::before { + color: #fff; + background-color: #b3d7ff; +} +.custom-control-input:disabled ~ .custom-control-label { + color: #6c757d; +} +.custom-control-input:disabled ~ .custom-control-label::before { + background-color: #e9ecef; +} + +.custom-control-label { + margin-bottom: 0; +} +.custom-control-label::before { + position: absolute; + top: 0.25rem; + left: 0; + display: block; + width: 1rem; + height: 1rem; + pointer-events: none; + content: ""; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + background-color: #dee2e6; +} +.custom-control-label::after { + position: absolute; + top: 0.25rem; + left: 0; + display: block; + width: 1rem; + height: 1rem; + content: ""; + background-repeat: no-repeat; + background-position: center center; + background-size: 50% 50%; +} + +.custom-checkbox .custom-control-label::before { + border-radius: 0.25rem; +} +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E"); +} +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E"); +} +.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} +.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-radio .custom-control-label::before { + border-radius: 50%; +} +.custom-radio .custom-control-input:checked ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-radio .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E"); +} +.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-select { + display: inline-block; + width: 100%; + height: calc(2.25rem + 2px); + padding: 0.375rem 1.75rem 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + vertical-align: middle; + background: #fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right 0.75rem center; + background-size: 8px 10px; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} +.custom-select:focus { + border-color: #80bdff; + outline: 0; + -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(128, 189, 255, 0.5); + box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(128, 189, 255, 0.5); +} +.custom-select:focus::-ms-value { + color: #495057; + background-color: #fff; +} +.custom-select[multiple], .custom-select[size]:not([size="1"]) { + height: auto; + padding-right: 0.75rem; + background-image: none; +} +.custom-select:disabled { + color: #6c757d; + background-color: #e9ecef; +} +.custom-select::-ms-expand { + opacity: 0; +} + +.custom-select-sm { + height: calc(1.8125rem + 2px); + padding-top: 0.375rem; + padding-bottom: 0.375rem; + font-size: 75%; +} + +.custom-select-lg { + height: calc(2.875rem + 2px); + padding-top: 0.375rem; + padding-bottom: 0.375rem; + font-size: 125%; +} + +.custom-file { + position: relative; + display: inline-block; + width: 100%; + height: calc(2.25rem + 2px); + margin-bottom: 0; +} + +.custom-file-input { + position: relative; + z-index: 2; + width: 100%; + height: calc(2.25rem + 2px); + margin: 0; + opacity: 0; +} +.custom-file-input:focus ~ .custom-file-control { + border-color: #80bdff; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.custom-file-input:focus ~ .custom-file-control::before { + border-color: #80bdff; +} +.custom-file-input:lang(en) ~ .custom-file-label::after { + content: "Browse"; +} + +.custom-file-label { + position: absolute; + top: 0; + right: 0; + left: 0; + z-index: 1; + height: calc(2.25rem + 2px); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + background-color: #fff; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} +.custom-file-label::after { + position: absolute; + top: 0; + right: 0; + bottom: 0; + z-index: 3; + display: block; + height: calc(calc(2.25rem + 2px) - 1px * 2); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + content: "Browse"; + background-color: #e9ecef; + border-left: 1px solid #ced4da; + border-radius: 0 0.25rem 0.25rem 0; +} + +.nav { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} + +.nav-link { + display: block; + padding: 0.5rem 1rem; +} +.nav-link:hover, .nav-link:focus { + text-decoration: none; +} +.nav-link.disabled { + color: #6c757d; +} + +.nav-tabs { + border-bottom: 1px solid #dee2e6; +} +.nav-tabs .nav-item { + margin-bottom: -1px; +} +.nav-tabs .nav-link { + border: 1px solid transparent; + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.nav-tabs .nav-link:hover, .nav-tabs .nav-link:focus { + border-color: #e9ecef #e9ecef #dee2e6; +} +.nav-tabs .nav-link.disabled { + color: #6c757d; + background-color: transparent; + border-color: transparent; +} +.nav-tabs .nav-link.active, +.nav-tabs .nav-item.show .nav-link { + color: #495057; + background-color: #fff; + border-color: #dee2e6 #dee2e6 #fff; +} +.nav-tabs .dropdown-menu { + margin-top: -1px; + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.nav-pills .nav-link { + border-radius: 0.25rem; +} +.nav-pills .nav-link.active, +.nav-pills .show > .nav-link { + color: #fff; + background-color: #007bff; +} + +.nav-fill .nav-item { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + text-align: center; +} + +.nav-justified .nav-item { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + text-align: center; +} + +.tab-content > .tab-pane { + display: none; +} +.tab-content > .active { + display: block; +} + +.navbar { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 0.5rem 1rem; +} +.navbar > .container, +.navbar > .container-fluid { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; +} + +.navbar-brand { + display: inline-block; + padding-top: 0.3125rem; + padding-bottom: 0.3125rem; + margin-right: 1rem; + font-size: 1.25rem; + line-height: inherit; + white-space: nowrap; +} +.navbar-brand:hover, .navbar-brand:focus { + text-decoration: none; +} + +.navbar-nav { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} +.navbar-nav .nav-link { + padding-right: 0; + padding-left: 0; +} +.navbar-nav .dropdown-menu { + position: static; + float: none; +} + +.navbar-text { + display: inline-block; + padding-top: 0.5rem; + padding-bottom: 0.5rem; +} + +.navbar-collapse { + -ms-flex-preferred-size: 100%; + flex-basis: 100%; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} + +.navbar-toggler { + padding: 0.25rem 0.75rem; + font-size: 1.25rem; + line-height: 1; + background-color: transparent; + border: 1px solid transparent; + border-radius: 0.25rem; +} +.navbar-toggler:hover, .navbar-toggler:focus { + text-decoration: none; +} +.navbar-toggler:not(:disabled):not(.disabled) { + cursor: pointer; +} + +.navbar-toggler-icon { + display: inline-block; + width: 1.5em; + height: 1.5em; + vertical-align: middle; + content: ""; + background: no-repeat center center; + background-size: 100% 100%; +} + +@media (max-width: 575.98px) { + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 576px) { + .navbar-expand-sm { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-sm .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-sm .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-sm .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-sm .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-sm .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-sm .navbar-toggler { + display: none; + } + .navbar-expand-sm .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 767.98px) { + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 768px) { + .navbar-expand-md { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-md .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-md .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-md .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-md .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-md .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-md .navbar-toggler { + display: none; + } + .navbar-expand-md .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 991.98px) { + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 992px) { + .navbar-expand-lg { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-lg .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-lg .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-lg .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-lg .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-lg .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-lg .navbar-toggler { + display: none; + } + .navbar-expand-lg .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 1199.98px) { + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 1200px) { + .navbar-expand-xl { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-xl .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-xl .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-xl .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-xl .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-xl .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-xl .navbar-toggler { + display: none; + } + .navbar-expand-xl .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +.navbar-expand { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; +} +.navbar-expand > .container, +.navbar-expand > .container-fluid { + padding-right: 0; + padding-left: 0; +} +.navbar-expand .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; +} +.navbar-expand .navbar-nav .dropdown-menu { + position: absolute; +} +.navbar-expand .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; +} +.navbar-expand .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; +} +.navbar-expand > .container, +.navbar-expand > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; +} +.navbar-expand .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; +} +.navbar-expand .navbar-toggler { + display: none; +} +.navbar-expand .dropup .dropdown-menu { + top: auto; + bottom: 100%; +} + +.navbar-light .navbar-brand { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-brand:hover, .navbar-light .navbar-brand:focus { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-nav .nav-link { + color: rgba(0, 0, 0, 0.5); +} +.navbar-light .navbar-nav .nav-link:hover, .navbar-light .navbar-nav .nav-link:focus { + color: rgba(0, 0, 0, 0.7); +} +.navbar-light .navbar-nav .nav-link.disabled { + color: rgba(0, 0, 0, 0.3); +} +.navbar-light .navbar-nav .show > .nav-link, +.navbar-light .navbar-nav .active > .nav-link, +.navbar-light .navbar-nav .nav-link.show, +.navbar-light .navbar-nav .nav-link.active { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-toggler { + color: rgba(0, 0, 0, 0.5); + border-color: rgba(0, 0, 0, 0.1); +} +.navbar-light .navbar-toggler-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); +} +.navbar-light .navbar-text { + color: rgba(0, 0, 0, 0.5); +} +.navbar-light .navbar-text a { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-text a:hover, .navbar-light .navbar-text a:focus { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-dark .navbar-brand { + color: #fff; +} +.navbar-dark .navbar-brand:hover, .navbar-dark .navbar-brand:focus { + color: #fff; +} +.navbar-dark .navbar-nav .nav-link { + color: rgba(255, 255, 255, 0.5); +} +.navbar-dark .navbar-nav .nav-link:hover, .navbar-dark .navbar-nav .nav-link:focus { + color: rgba(255, 255, 255, 0.75); +} +.navbar-dark .navbar-nav .nav-link.disabled { + color: rgba(255, 255, 255, 0.25); +} +.navbar-dark .navbar-nav .show > .nav-link, +.navbar-dark .navbar-nav .active > .nav-link, +.navbar-dark .navbar-nav .nav-link.show, +.navbar-dark .navbar-nav .nav-link.active { + color: #fff; +} +.navbar-dark .navbar-toggler { + color: rgba(255, 255, 255, 0.5); + border-color: rgba(255, 255, 255, 0.1); +} +.navbar-dark .navbar-toggler-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); +} +.navbar-dark .navbar-text { + color: rgba(255, 255, 255, 0.5); +} +.navbar-dark .navbar-text a { + color: #fff; +} +.navbar-dark .navbar-text a:hover, .navbar-dark .navbar-text a:focus { + color: #fff; +} + +.card { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + min-width: 0; + word-wrap: break-word; + background-color: #fff; + background-clip: border-box; + border: 1px solid rgba(0, 0, 0, 0.125); + border-radius: 0.25rem; +} +.card > hr { + margin-right: 0; + margin-left: 0; +} +.card > .list-group:first-child .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.card > .list-group:last-child .list-group-item:last-child { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.card-body { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1.25rem; +} + +.card-title { + margin-bottom: 0.75rem; +} + +.card-subtitle { + margin-top: -0.375rem; + margin-bottom: 0; +} + +.card-text:last-child { + margin-bottom: 0; +} + +.card-link:hover { + text-decoration: none; +} +.card-link + .card-link { + margin-left: 1.25rem; +} + +.card-header { + padding: 0.75rem 1.25rem; + margin-bottom: 0; + background-color: rgba(0, 0, 0, 0.03); + border-bottom: 1px solid rgba(0, 0, 0, 0.125); +} +.card-header:first-child { + border-radius: calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0; +} +.card-header + .list-group .list-group-item:first-child { + border-top: 0; +} + +.card-footer { + padding: 0.75rem 1.25rem; + background-color: rgba(0, 0, 0, 0.03); + border-top: 1px solid rgba(0, 0, 0, 0.125); +} +.card-footer:last-child { + border-radius: 0 0 calc(0.25rem - 1px) calc(0.25rem - 1px); +} + +.card-header-tabs { + margin-right: -0.625rem; + margin-bottom: -0.75rem; + margin-left: -0.625rem; + border-bottom: 0; +} + +.card-header-pills { + margin-right: -0.625rem; + margin-left: -0.625rem; +} + +.card-img-overlay { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + padding: 1.25rem; +} + +.card-img { + width: 100%; + border-radius: calc(0.25rem - 1px); +} + +.card-img-top { + width: 100%; + border-top-left-radius: calc(0.25rem - 1px); + border-top-right-radius: calc(0.25rem - 1px); +} + +.card-img-bottom { + width: 100%; + border-bottom-right-radius: calc(0.25rem - 1px); + border-bottom-left-radius: calc(0.25rem - 1px); +} + +.card-deck { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; +} +.card-deck .card { + margin-bottom: 15px; +} +@media (min-width: 576px) { + .card-deck { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + margin-right: -15px; + margin-left: -15px; + } + .card-deck .card { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-flex: 1; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + margin-right: 15px; + margin-bottom: 0; + margin-left: 15px; + } +} + +.card-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; +} +.card-group > .card { + margin-bottom: 15px; +} +@media (min-width: 576px) { + .card-group { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + } + .card-group > .card { + -webkit-box-flex: 1; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + margin-bottom: 0; + } + .card-group > .card + .card { + margin-left: 0; + border-left: 0; + } + .card-group > .card:first-child { + border-top-right-radius: 0; + border-bottom-right-radius: 0; + } + .card-group > .card:first-child .card-img-top, + .card-group > .card:first-child .card-header { + border-top-right-radius: 0; + } + .card-group > .card:first-child .card-img-bottom, + .card-group > .card:first-child .card-footer { + border-bottom-right-radius: 0; + } + .card-group > .card:last-child { + border-top-left-radius: 0; + border-bottom-left-radius: 0; + } + .card-group > .card:last-child .card-img-top, + .card-group > .card:last-child .card-header { + border-top-left-radius: 0; + } + .card-group > .card:last-child .card-img-bottom, + .card-group > .card:last-child .card-footer { + border-bottom-left-radius: 0; + } + .card-group > .card:only-child { + border-radius: 0.25rem; + } + .card-group > .card:only-child .card-img-top, + .card-group > .card:only-child .card-header { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; + } + .card-group > .card:only-child .card-img-bottom, + .card-group > .card:only-child .card-footer { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + } + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) { + border-radius: 0; + } + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-top, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-header, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-footer { + border-radius: 0; + } +} + +.card-columns .card { + margin-bottom: 0.75rem; +} +@media (min-width: 576px) { + .card-columns { + -webkit-column-count: 3; + column-count: 3; + -webkit-column-gap: 1.25rem; + column-gap: 1.25rem; + } + .card-columns .card { + display: inline-block; + width: 100%; + } +} + +.breadcrumb { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding: 0.75rem 1rem; + margin-bottom: 1rem; + list-style: none; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.breadcrumb-item + .breadcrumb-item::before { + display: inline-block; + padding-right: 0.5rem; + padding-left: 0.5rem; + color: #6c757d; + content: "/"; +} +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: underline; +} +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: none; +} +.breadcrumb-item.active { + color: #6c757d; +} + +.pagination { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + padding-left: 0; + list-style: none; + border-radius: 0.25rem; +} + +.page-link { + position: relative; + display: block; + padding: 0.5rem 0.75rem; + margin-left: -1px; + line-height: 1.25; + color: #007bff; + background-color: #fff; + border: 1px solid #dee2e6; +} +.page-link:hover { + color: #0056b3; + text-decoration: none; + background-color: #e9ecef; + border-color: #dee2e6; +} +.page-link:focus { + z-index: 2; + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.page-link:not(:disabled):not(.disabled) { + cursor: pointer; +} + +.page-item:first-child .page-link { + margin-left: 0; + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} +.page-item:last-child .page-link { + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; +} +.page-item.active .page-link { + z-index: 1; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.page-item.disabled .page-link { + color: #6c757d; + pointer-events: none; + cursor: auto; + background-color: #fff; + border-color: #dee2e6; +} + +.pagination-lg .page-link { + padding: 0.75rem 1.5rem; + font-size: 1.25rem; + line-height: 1.5; +} +.pagination-lg .page-item:first-child .page-link { + border-top-left-radius: 0.3rem; + border-bottom-left-radius: 0.3rem; +} +.pagination-lg .page-item:last-child .page-link { + border-top-right-radius: 0.3rem; + border-bottom-right-radius: 0.3rem; +} + +.pagination-sm .page-link { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; +} +.pagination-sm .page-item:first-child .page-link { + border-top-left-radius: 0.2rem; + border-bottom-left-radius: 0.2rem; +} +.pagination-sm .page-item:last-child .page-link { + border-top-right-radius: 0.2rem; + border-bottom-right-radius: 0.2rem; +} + +.badge { + display: inline-block; + padding: 0.25em 0.4em; + font-size: 75%; + font-weight: 700; + line-height: 1; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: 0.25rem; +} +.badge:empty { + display: none; +} + +.btn .badge { + position: relative; + top: -1px; +} + +.badge-pill { + padding-right: 0.6em; + padding-left: 0.6em; + border-radius: 10rem; +} + +.badge-primary { + color: #fff; + background-color: #007bff; +} +.badge-primary[href]:hover, .badge-primary[href]:focus { + color: #fff; + text-decoration: none; + background-color: #0062cc; +} + +.badge-secondary { + color: #fff; + background-color: #6c757d; +} +.badge-secondary[href]:hover, .badge-secondary[href]:focus { + color: #fff; + text-decoration: none; + background-color: #545b62; +} + +.badge-success { + color: #fff; + background-color: #28a745; +} +.badge-success[href]:hover, .badge-success[href]:focus { + color: #fff; + text-decoration: none; + background-color: #1e7e34; +} + +.badge-info { + color: #fff; + background-color: #17a2b8; +} +.badge-info[href]:hover, .badge-info[href]:focus { + color: #fff; + text-decoration: none; + background-color: #117a8b; +} + +.badge-warning { + color: #212529; + background-color: #ffc107; +} +.badge-warning[href]:hover, .badge-warning[href]:focus { + color: #212529; + text-decoration: none; + background-color: #d39e00; +} + +.badge-danger { + color: #fff; + background-color: #dc3545; +} +.badge-danger[href]:hover, .badge-danger[href]:focus { + color: #fff; + text-decoration: none; + background-color: #bd2130; +} + +.badge-light { + color: #212529; + background-color: #f8f9fa; +} +.badge-light[href]:hover, .badge-light[href]:focus { + color: #212529; + text-decoration: none; + background-color: #dae0e5; +} + +.badge-dark { + color: #fff; + background-color: #343a40; +} +.badge-dark[href]:hover, .badge-dark[href]:focus { + color: #fff; + text-decoration: none; + background-color: #1d2124; +} + +.jumbotron { + padding: 2rem 1rem; + margin-bottom: 2rem; + background-color: #e9ecef; + border-radius: 0.3rem; +} +@media (min-width: 576px) { + .jumbotron { + padding: 4rem 2rem; + } +} + +.jumbotron-fluid { + padding-right: 0; + padding-left: 0; + border-radius: 0; +} + +.alert { + position: relative; + padding: 0.75rem 1.25rem; + margin-bottom: 1rem; + border: 1px solid transparent; + border-radius: 0.25rem; +} + +.alert-heading { + color: inherit; +} + +.alert-link { + font-weight: 700; +} + +.alert-dismissible { + padding-right: 4rem; +} +.alert-dismissible .close { + position: absolute; + top: 0; + right: 0; + padding: 0.75rem 1.25rem; + color: inherit; +} + +.alert-primary { + color: #004085; + background-color: #cce5ff; + border-color: #b8daff; +} +.alert-primary hr { + border-top-color: #9fcdff; +} +.alert-primary .alert-link { + color: #002752; +} + +.alert-secondary { + color: #383d41; + background-color: #e2e3e5; + border-color: #d6d8db; +} +.alert-secondary hr { + border-top-color: #c8cbcf; +} +.alert-secondary .alert-link { + color: #202326; +} + +.alert-success { + color: #155724; + background-color: #d4edda; + border-color: #c3e6cb; +} +.alert-success hr { + border-top-color: #b1dfbb; +} +.alert-success .alert-link { + color: #0b2e13; +} + +.alert-info { + color: #0c5460; + background-color: #d1ecf1; + border-color: #bee5eb; +} +.alert-info hr { + border-top-color: #abdde5; +} +.alert-info .alert-link { + color: #062c33; +} + +.alert-warning { + color: #856404; + background-color: #fff3cd; + border-color: #ffeeba; +} +.alert-warning hr { + border-top-color: #ffe8a1; +} +.alert-warning .alert-link { + color: #533f03; +} + +.alert-danger { + color: #721c24; + background-color: #f8d7da; + border-color: #f5c6cb; +} +.alert-danger hr { + border-top-color: #f1b0b7; +} +.alert-danger .alert-link { + color: #491217; +} + +.alert-light { + color: #818182; + background-color: #fefefe; + border-color: #fdfdfe; +} +.alert-light hr { + border-top-color: #ececf6; +} +.alert-light .alert-link { + color: #686868; +} + +.alert-dark { + color: #1b1e21; + background-color: #d6d8d9; + border-color: #c6c8ca; +} +.alert-dark hr { + border-top-color: #b9bbbe; +} +.alert-dark .alert-link { + color: #040505; +} + +@-webkit-keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} + +@keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} +.progress { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 1rem; + overflow: hidden; + font-size: 0.75rem; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.progress-bar { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + color: #fff; + text-align: center; + background-color: #007bff; + -webkit-transition: width 0.6s ease; + transition: width 0.6s ease; +} + +.progress-bar-striped { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; +} + +.progress-bar-animated { + -webkit-animation: progress-bar-stripes 1s linear infinite; + animation: progress-bar-stripes 1s linear infinite; +} + +.media { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; +} + +.media-body { + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1; +} + +.list-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; +} + +.list-group-item-action { + width: 100%; + color: #495057; + text-align: inherit; +} +.list-group-item-action:hover, .list-group-item-action:focus { + color: #495057; + text-decoration: none; + background-color: #f8f9fa; +} +.list-group-item-action:active { + color: #212529; + background-color: #e9ecef; +} + +.list-group-item { + position: relative; + display: block; + padding: 0.75rem 1.25rem; + margin-bottom: -1px; + background-color: #fff; + border: 1px solid rgba(0, 0, 0, 0.125); +} +.list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.list-group-item:last-child { + margin-bottom: 0; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} +.list-group-item:hover, .list-group-item:focus { + z-index: 1; + text-decoration: none; +} +.list-group-item.disabled, .list-group-item:disabled { + color: #6c757d; + background-color: #fff; +} +.list-group-item.active { + z-index: 2; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.list-group-flush .list-group-item { + border-right: 0; + border-left: 0; + border-radius: 0; +} +.list-group-flush:first-child .list-group-item:first-child { + border-top: 0; +} +.list-group-flush:last-child .list-group-item:last-child { + border-bottom: 0; +} + +.list-group-item-primary { + color: #004085; + background-color: #b8daff; +} +.list-group-item-primary.list-group-item-action:hover, .list-group-item-primary.list-group-item-action:focus { + color: #004085; + background-color: #9fcdff; +} +.list-group-item-primary.list-group-item-action.active { + color: #fff; + background-color: #004085; + border-color: #004085; +} + +.list-group-item-secondary { + color: #383d41; + background-color: #d6d8db; +} +.list-group-item-secondary.list-group-item-action:hover, .list-group-item-secondary.list-group-item-action:focus { + color: #383d41; + background-color: #c8cbcf; +} +.list-group-item-secondary.list-group-item-action.active { + color: #fff; + background-color: #383d41; + border-color: #383d41; +} + +.list-group-item-success { + color: #155724; + background-color: #c3e6cb; +} +.list-group-item-success.list-group-item-action:hover, .list-group-item-success.list-group-item-action:focus { + color: #155724; + background-color: #b1dfbb; +} +.list-group-item-success.list-group-item-action.active { + color: #fff; + background-color: #155724; + border-color: #155724; +} + +.list-group-item-info { + color: #0c5460; + background-color: #bee5eb; +} +.list-group-item-info.list-group-item-action:hover, .list-group-item-info.list-group-item-action:focus { + color: #0c5460; + background-color: #abdde5; +} +.list-group-item-info.list-group-item-action.active { + color: #fff; + background-color: #0c5460; + border-color: #0c5460; +} + +.list-group-item-warning { + color: #856404; + background-color: #ffeeba; +} +.list-group-item-warning.list-group-item-action:hover, .list-group-item-warning.list-group-item-action:focus { + color: #856404; + background-color: #ffe8a1; +} +.list-group-item-warning.list-group-item-action.active { + color: #fff; + background-color: #856404; + border-color: #856404; +} + +.list-group-item-danger { + color: #721c24; + background-color: #f5c6cb; +} +.list-group-item-danger.list-group-item-action:hover, .list-group-item-danger.list-group-item-action:focus { + color: #721c24; + background-color: #f1b0b7; +} +.list-group-item-danger.list-group-item-action.active { + color: #fff; + background-color: #721c24; + border-color: #721c24; +} + +.list-group-item-light { + color: #818182; + background-color: #fdfdfe; +} +.list-group-item-light.list-group-item-action:hover, .list-group-item-light.list-group-item-action:focus { + color: #818182; + background-color: #ececf6; +} +.list-group-item-light.list-group-item-action.active { + color: #fff; + background-color: #818182; + border-color: #818182; +} + +.list-group-item-dark { + color: #1b1e21; + background-color: #c6c8ca; +} +.list-group-item-dark.list-group-item-action:hover, .list-group-item-dark.list-group-item-action:focus { + color: #1b1e21; + background-color: #b9bbbe; +} +.list-group-item-dark.list-group-item-action.active { + color: #fff; + background-color: #1b1e21; + border-color: #1b1e21; +} + +.close { + float: right; + font-size: 1.5rem; + font-weight: 700; + line-height: 1; + color: #000; + text-shadow: 0 1px 0 #fff; + opacity: .5; +} +.close:hover, .close:focus { + color: #000; + text-decoration: none; + opacity: .75; +} +.close:not(:disabled):not(.disabled) { + cursor: pointer; +} + +button.close { + padding: 0; + background-color: transparent; + border: 0; + -webkit-appearance: none; +} + +.modal-open { + overflow: hidden; +} + +.modal { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1050; + display: none; + overflow: hidden; + outline: 0; +} +.modal-open .modal { + overflow-x: hidden; + overflow-y: auto; +} + +.modal-dialog { + position: relative; + width: auto; + margin: 0.5rem; + pointer-events: none; +} +.modal.fade .modal-dialog { + -webkit-transition: -webkit-transform 0.3s ease-out; + transition: -webkit-transform 0.3s ease-out; + transition: transform 0.3s ease-out; + transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out; + -webkit-transform: translate(0, -25%); + transform: translate(0, -25%); +} +.modal.show .modal-dialog { + -webkit-transform: translate(0, 0); + transform: translate(0, 0); +} + +.modal-dialog-centered { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + min-height: calc(100% - (0.5rem * 2)); +} + +.modal-content { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + width: 100%; + pointer-events: auto; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; + outline: 0; +} + +.modal-backdrop { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1040; + background-color: #000; +} +.modal-backdrop.fade { + opacity: 0; +} +.modal-backdrop.show { + opacity: 0.5; +} + +.modal-header { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 1rem; + border-bottom: 1px solid #e9ecef; + border-top-left-radius: 0.3rem; + border-top-right-radius: 0.3rem; +} +.modal-header .close { + padding: 1rem; + margin: -1rem -1rem -1rem auto; +} + +.modal-title { + margin-bottom: 0; + line-height: 1.5; +} + +.modal-body { + position: relative; + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1rem; +} + +.modal-footer { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + padding: 1rem; + border-top: 1px solid #e9ecef; +} +.modal-footer > :not(:first-child) { + margin-left: .25rem; +} +.modal-footer > :not(:last-child) { + margin-right: .25rem; +} + +.modal-scrollbar-measure { + position: absolute; + top: -9999px; + width: 50px; + height: 50px; + overflow: scroll; +} + +@media (min-width: 576px) { + .modal-dialog { + max-width: 500px; + margin: 1.75rem auto; + } + + .modal-dialog-centered { + min-height: calc(100% - (1.75rem * 2)); + } + + .modal-sm { + max-width: 300px; + } +} +@media (min-width: 992px) { + .modal-lg { + max-width: 800px; + } +} +.tooltip { + position: absolute; + z-index: 1070; + display: block; + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + opacity: 0; +} +.tooltip.show { + opacity: 0.9; +} +.tooltip .arrow { + position: absolute; + display: block; + width: 0.8rem; + height: 0.4rem; +} +.tooltip .arrow::before { + position: absolute; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-tooltip-top, .bs-tooltip-auto[x-placement^="top"] { + padding: 0.4rem 0; +} +.bs-tooltip-top .arrow, .bs-tooltip-auto[x-placement^="top"] .arrow { + bottom: 0; +} +.bs-tooltip-top .arrow::before, .bs-tooltip-auto[x-placement^="top"] .arrow::before { + top: 0; + border-width: 0.4rem 0.4rem 0; + border-top-color: #000; +} + +.bs-tooltip-right, .bs-tooltip-auto[x-placement^="right"] { + padding: 0 0.4rem; +} +.bs-tooltip-right .arrow, .bs-tooltip-auto[x-placement^="right"] .arrow { + left: 0; + width: 0.4rem; + height: 0.8rem; +} +.bs-tooltip-right .arrow::before, .bs-tooltip-auto[x-placement^="right"] .arrow::before { + right: 0; + border-width: 0.4rem 0.4rem 0.4rem 0; + border-right-color: #000; +} + +.bs-tooltip-bottom, .bs-tooltip-auto[x-placement^="bottom"] { + padding: 0.4rem 0; +} +.bs-tooltip-bottom .arrow, .bs-tooltip-auto[x-placement^="bottom"] .arrow { + top: 0; +} +.bs-tooltip-bottom .arrow::before, .bs-tooltip-auto[x-placement^="bottom"] .arrow::before { + bottom: 0; + border-width: 0 0.4rem 0.4rem; + border-bottom-color: #000; +} + +.bs-tooltip-left, .bs-tooltip-auto[x-placement^="left"] { + padding: 0 0.4rem; +} +.bs-tooltip-left .arrow, .bs-tooltip-auto[x-placement^="left"] .arrow { + right: 0; + width: 0.4rem; + height: 0.8rem; +} +.bs-tooltip-left .arrow::before, .bs-tooltip-auto[x-placement^="left"] .arrow::before { + left: 0; + border-width: 0.4rem 0 0.4rem 0.4rem; + border-left-color: #000; +} + +.tooltip-inner { + max-width: 200px; + padding: 0.25rem 0.5rem; + color: #fff; + text-align: center; + background-color: #000; + border-radius: 0.25rem; +} + +.popover { + position: absolute; + top: 0; + left: 0; + z-index: 1060; + display: block; + max-width: 276px; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; +} +.popover .arrow { + position: absolute; + display: block; + width: 1rem; + height: 0.5rem; + margin: 0 0.3rem; +} +.popover .arrow::before, .popover .arrow::after { + position: absolute; + display: block; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-popover-top, .bs-popover-auto[x-placement^="top"] { + margin-bottom: 0.5rem; +} +.bs-popover-top .arrow, .bs-popover-auto[x-placement^="top"] .arrow { + bottom: calc((0.5rem + 1px) * -1); +} +.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before, +.bs-popover-top .arrow::after, +.bs-popover-auto[x-placement^="top"] .arrow::after { + border-width: 0.5rem 0.5rem 0; +} +.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before { + bottom: 0; + border-top-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-top .arrow::after, .bs-popover-auto[x-placement^="top"] .arrow::after { + bottom: 1px; + border-top-color: #fff; +} + +.bs-popover-right, .bs-popover-auto[x-placement^="right"] { + margin-left: 0.5rem; +} +.bs-popover-right .arrow, .bs-popover-auto[x-placement^="right"] .arrow { + left: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} +.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before, +.bs-popover-right .arrow::after, +.bs-popover-auto[x-placement^="right"] .arrow::after { + border-width: 0.5rem 0.5rem 0.5rem 0; +} +.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before { + left: 0; + border-right-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-right .arrow::after, .bs-popover-auto[x-placement^="right"] .arrow::after { + left: 1px; + border-right-color: #fff; +} + +.bs-popover-bottom, .bs-popover-auto[x-placement^="bottom"] { + margin-top: 0.5rem; +} +.bs-popover-bottom .arrow, .bs-popover-auto[x-placement^="bottom"] .arrow { + top: calc((0.5rem + 1px) * -1); +} +.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before, +.bs-popover-bottom .arrow::after, +.bs-popover-auto[x-placement^="bottom"] .arrow::after { + border-width: 0 0.5rem 0.5rem 0.5rem; +} +.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before { + top: 0; + border-bottom-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-bottom .arrow::after, .bs-popover-auto[x-placement^="bottom"] .arrow::after { + top: 1px; + border-bottom-color: #fff; +} +.bs-popover-bottom .popover-header::before, .bs-popover-auto[x-placement^="bottom"] .popover-header::before { + position: absolute; + top: 0; + left: 50%; + display: block; + width: 1rem; + margin-left: -0.5rem; + content: ""; + border-bottom: 1px solid #f7f7f7; +} + +.bs-popover-left, .bs-popover-auto[x-placement^="left"] { + margin-right: 0.5rem; +} +.bs-popover-left .arrow, .bs-popover-auto[x-placement^="left"] .arrow { + right: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} +.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before, +.bs-popover-left .arrow::after, +.bs-popover-auto[x-placement^="left"] .arrow::after { + border-width: 0.5rem 0 0.5rem 0.5rem; +} +.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before { + right: 0; + border-left-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-left .arrow::after, .bs-popover-auto[x-placement^="left"] .arrow::after { + right: 1px; + border-left-color: #fff; +} + +.popover-header { + padding: 0.5rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + color: inherit; + background-color: #f7f7f7; + border-bottom: 1px solid #ebebeb; + border-top-left-radius: calc(0.3rem - 1px); + border-top-right-radius: calc(0.3rem - 1px); +} +.popover-header:empty { + display: none; +} + +.popover-body { + padding: 0.5rem 0.75rem; + color: #212529; +} + +.carousel { + position: relative; +} + +.carousel-inner { + position: relative; + width: 100%; + overflow: hidden; +} + +.carousel-item { + position: relative; + display: none; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + width: 100%; + -webkit-transition: -webkit-transform 0.6s ease; + transition: -webkit-transform 0.6s ease; + transition: transform 0.6s ease; + transition: transform 0.6s ease, -webkit-transform 0.6s ease; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + -webkit-perspective: 1000px; + perspective: 1000px; +} + +.carousel-item.active, +.carousel-item-next, +.carousel-item-prev { + display: block; +} + +.carousel-item-next, +.carousel-item-prev { + position: absolute; + top: 0; +} + +.carousel-item-next.carousel-item-left, +.carousel-item-prev.carousel-item-right { + -webkit-transform: translateX(0); + transform: translateX(0); +} +@supports ((-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d)) { + .carousel-item-next.carousel-item-left, + .carousel-item-prev.carousel-item-right { + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); + } +} + +.carousel-item-next, +.active.carousel-item-right { + -webkit-transform: translateX(100%); + transform: translateX(100%); +} +@supports ((-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d)) { + .carousel-item-next, + .active.carousel-item-right { + -webkit-transform: translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0); + } +} + +.carousel-item-prev, +.active.carousel-item-left { + -webkit-transform: translateX(-100%); + transform: translateX(-100%); +} +@supports ((-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d)) { + .carousel-item-prev, + .active.carousel-item-left { + -webkit-transform: translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0); + } +} + +.carousel-control-prev, +.carousel-control-next { + position: absolute; + top: 0; + bottom: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + width: 15%; + color: #fff; + text-align: center; + opacity: 0.5; +} +.carousel-control-prev:hover, .carousel-control-prev:focus, +.carousel-control-next:hover, +.carousel-control-next:focus { + color: #fff; + text-decoration: none; + outline: 0; + opacity: .9; +} + +.carousel-control-prev { + left: 0; +} + +.carousel-control-next { + right: 0; +} + +.carousel-control-prev-icon, +.carousel-control-next-icon { + display: inline-block; + width: 20px; + height: 20px; + background: transparent no-repeat center center; + background-size: 100% 100%; +} + +.carousel-control-prev-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E"); +} + +.carousel-control-next-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E"); +} + +.carousel-indicators { + position: absolute; + right: 0; + bottom: 10px; + left: 0; + z-index: 15; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + padding-left: 0; + margin-right: 15%; + margin-left: 15%; + list-style: none; +} +.carousel-indicators li { + position: relative; + -webkit-box-flex: 0; + -ms-flex: 0 1 auto; + flex: 0 1 auto; + width: 30px; + height: 3px; + margin-right: 3px; + margin-left: 3px; + text-indent: -999px; + background-color: rgba(255, 255, 255, 0.5); +} +.carousel-indicators li::before { + position: absolute; + top: -10px; + left: 0; + display: inline-block; + width: 100%; + height: 10px; + content: ""; +} +.carousel-indicators li::after { + position: absolute; + bottom: -10px; + left: 0; + display: inline-block; + width: 100%; + height: 10px; + content: ""; +} +.carousel-indicators .active { + background-color: #fff; +} + +.carousel-caption { + position: absolute; + right: 15%; + bottom: 20px; + left: 15%; + z-index: 10; + padding-top: 20px; + padding-bottom: 20px; + color: #fff; + text-align: center; +} + +.align-baseline { + vertical-align: baseline !important; +} + +.align-top { + vertical-align: top !important; +} + +.align-middle { + vertical-align: middle !important; +} + +.align-bottom { + vertical-align: bottom !important; +} + +.align-text-bottom { + vertical-align: text-bottom !important; +} + +.align-text-top { + vertical-align: text-top !important; +} + +.bg-primary { + background-color: #007bff !important; +} + +a.bg-primary:hover, a.bg-primary:focus, +button.bg-primary:hover, +button.bg-primary:focus { + background-color: #0062cc !important; +} + +.bg-secondary { + background-color: #6c757d !important; +} + +a.bg-secondary:hover, a.bg-secondary:focus, +button.bg-secondary:hover, +button.bg-secondary:focus { + background-color: #545b62 !important; +} + +.bg-success { + background-color: #28a745 !important; +} + +a.bg-success:hover, a.bg-success:focus, +button.bg-success:hover, +button.bg-success:focus { + background-color: #1e7e34 !important; +} + +.bg-info { + background-color: #17a2b8 !important; +} + +a.bg-info:hover, a.bg-info:focus, +button.bg-info:hover, +button.bg-info:focus { + background-color: #117a8b !important; +} + +.bg-warning { + background-color: #ffc107 !important; +} + +a.bg-warning:hover, a.bg-warning:focus, +button.bg-warning:hover, +button.bg-warning:focus { + background-color: #d39e00 !important; +} + +.bg-danger { + background-color: #dc3545 !important; +} + +a.bg-danger:hover, a.bg-danger:focus, +button.bg-danger:hover, +button.bg-danger:focus { + background-color: #bd2130 !important; +} + +.bg-light { + background-color: #f8f9fa !important; +} + +a.bg-light:hover, a.bg-light:focus, +button.bg-light:hover, +button.bg-light:focus { + background-color: #dae0e5 !important; +} + +.bg-dark { + background-color: #343a40 !important; +} + +a.bg-dark:hover, a.bg-dark:focus, +button.bg-dark:hover, +button.bg-dark:focus { + background-color: #1d2124 !important; +} + +.bg-white { + background-color: #fff !important; +} + +.bg-transparent { + background-color: transparent !important; +} + +.border { + border: 1px solid #dee2e6 !important; +} + +.border-top { + border-top: 1px solid #dee2e6 !important; +} + +.border-right { + border-right: 1px solid #dee2e6 !important; +} + +.border-bottom { + border-bottom: 1px solid #dee2e6 !important; +} + +.border-left { + border-left: 1px solid #dee2e6 !important; +} + +.border-0 { + border: 0 !important; +} + +.border-top-0 { + border-top: 0 !important; +} + +.border-right-0 { + border-right: 0 !important; +} + +.border-bottom-0 { + border-bottom: 0 !important; +} + +.border-left-0 { + border-left: 0 !important; +} + +.border-primary { + border-color: #007bff !important; +} + +.border-secondary { + border-color: #6c757d !important; +} + +.border-success { + border-color: #28a745 !important; +} + +.border-info { + border-color: #17a2b8 !important; +} + +.border-warning { + border-color: #ffc107 !important; +} + +.border-danger { + border-color: #dc3545 !important; +} + +.border-light { + border-color: #f8f9fa !important; +} + +.border-dark { + border-color: #343a40 !important; +} + +.border-white { + border-color: #fff !important; +} + +.rounded { + border-radius: 0.25rem !important; +} + +.rounded-top { + border-top-left-radius: 0.25rem !important; + border-top-right-radius: 0.25rem !important; +} + +.rounded-right { + border-top-right-radius: 0.25rem !important; + border-bottom-right-radius: 0.25rem !important; +} + +.rounded-bottom { + border-bottom-right-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-left { + border-top-left-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-circle { + border-radius: 50% !important; +} + +.rounded-0 { + border-radius: 0 !important; +} + +.clearfix::after { + display: block; + clear: both; + content: ""; +} + +.d-none { + display: none !important; +} + +.d-inline { + display: inline !important; +} + +.d-inline-block { + display: inline-block !important; +} + +.d-block { + display: block !important; +} + +.d-table { + display: table !important; +} + +.d-table-row { + display: table-row !important; +} + +.d-table-cell { + display: table-cell !important; +} + +.d-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; +} + +.d-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; +} + +@media (min-width: 576px) { + .d-sm-none { + display: none !important; + } + + .d-sm-inline { + display: inline !important; + } + + .d-sm-inline-block { + display: inline-block !important; + } + + .d-sm-block { + display: block !important; + } + + .d-sm-table { + display: table !important; + } + + .d-sm-table-row { + display: table-row !important; + } + + .d-sm-table-cell { + display: table-cell !important; + } + + .d-sm-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-sm-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 768px) { + .d-md-none { + display: none !important; + } + + .d-md-inline { + display: inline !important; + } + + .d-md-inline-block { + display: inline-block !important; + } + + .d-md-block { + display: block !important; + } + + .d-md-table { + display: table !important; + } + + .d-md-table-row { + display: table-row !important; + } + + .d-md-table-cell { + display: table-cell !important; + } + + .d-md-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-md-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 992px) { + .d-lg-none { + display: none !important; + } + + .d-lg-inline { + display: inline !important; + } + + .d-lg-inline-block { + display: inline-block !important; + } + + .d-lg-block { + display: block !important; + } + + .d-lg-table { + display: table !important; + } + + .d-lg-table-row { + display: table-row !important; + } + + .d-lg-table-cell { + display: table-cell !important; + } + + .d-lg-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-lg-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 1200px) { + .d-xl-none { + display: none !important; + } + + .d-xl-inline { + display: inline !important; + } + + .d-xl-inline-block { + display: inline-block !important; + } + + .d-xl-block { + display: block !important; + } + + .d-xl-table { + display: table !important; + } + + .d-xl-table-row { + display: table-row !important; + } + + .d-xl-table-cell { + display: table-cell !important; + } + + .d-xl-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-xl-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media print { + .d-print-none { + display: none !important; + } + + .d-print-inline { + display: inline !important; + } + + .d-print-inline-block { + display: inline-block !important; + } + + .d-print-block { + display: block !important; + } + + .d-print-table { + display: table !important; + } + + .d-print-table-row { + display: table-row !important; + } + + .d-print-table-cell { + display: table-cell !important; + } + + .d-print-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-print-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +.embed-responsive { + position: relative; + display: block; + width: 100%; + padding: 0; + overflow: hidden; +} +.embed-responsive::before { + display: block; + content: ""; +} +.embed-responsive .embed-responsive-item, +.embed-responsive iframe, +.embed-responsive embed, +.embed-responsive object, +.embed-responsive video { + position: absolute; + top: 0; + bottom: 0; + left: 0; + width: 100%; + height: 100%; + border: 0; +} + +.embed-responsive-21by9::before { + padding-top: 42.8571428571%; +} + +.embed-responsive-16by9::before { + padding-top: 56.25%; +} + +.embed-responsive-4by3::before { + padding-top: 75%; +} + +.embed-responsive-1by1::before { + padding-top: 100%; +} + +.flex-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; +} + +.flex-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; +} + +.flex-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; +} + +.flex-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; +} + +.flex-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; +} + +.flex-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; +} + +.flex-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; +} + +.justify-content-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; +} + +.justify-content-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; +} + +.justify-content-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; +} + +.justify-content-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; +} + +.justify-content-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; +} + +.align-items-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; +} + +.align-items-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; +} + +.align-items-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; +} + +.align-items-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; +} + +.align-items-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; +} + +.align-content-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; +} + +.align-content-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; +} + +.align-content-center { + -ms-flex-line-pack: center !important; + align-content: center !important; +} + +.align-content-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; +} + +.align-content-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; +} + +.align-content-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; +} + +.align-self-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; +} + +.align-self-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; +} + +.align-self-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; +} + +.align-self-center { + -ms-flex-item-align: center !important; + align-self: center !important; +} + +.align-self-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; +} + +.align-self-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; +} + +@media (min-width: 576px) { + .flex-sm-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-sm-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-sm-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-sm-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-sm-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-sm-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-sm-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-sm-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-sm-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-sm-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-sm-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-sm-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-sm-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-sm-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-sm-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-sm-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-sm-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-sm-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-sm-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-sm-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-sm-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-sm-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-sm-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-sm-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-sm-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-sm-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-sm-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-sm-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-sm-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 768px) { + .flex-md-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-md-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-md-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-md-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-md-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-md-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-md-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-md-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-md-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-md-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-md-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-md-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-md-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-md-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-md-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-md-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-md-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-md-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-md-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-md-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-md-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-md-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-md-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-md-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-md-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-md-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-md-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-md-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-md-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 992px) { + .flex-lg-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-lg-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-lg-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-lg-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-lg-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-lg-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-lg-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-lg-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-lg-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-lg-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-lg-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-lg-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-lg-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-lg-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-lg-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-lg-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-lg-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-lg-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-lg-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-lg-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-lg-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-lg-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-lg-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-lg-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-lg-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-lg-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-lg-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-lg-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-lg-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 1200px) { + .flex-xl-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-xl-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-xl-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-xl-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-xl-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-xl-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-xl-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-xl-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-xl-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-xl-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-xl-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-xl-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-xl-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-xl-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-xl-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-xl-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-xl-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-xl-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-xl-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-xl-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-xl-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-xl-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-xl-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-xl-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-xl-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-xl-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-xl-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-xl-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-xl-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +.float-left { + float: left !important; +} + +.float-right { + float: right !important; +} + +.float-none { + float: none !important; +} + +@media (min-width: 576px) { + .float-sm-left { + float: left !important; + } + + .float-sm-right { + float: right !important; + } + + .float-sm-none { + float: none !important; + } +} +@media (min-width: 768px) { + .float-md-left { + float: left !important; + } + + .float-md-right { + float: right !important; + } + + .float-md-none { + float: none !important; + } +} +@media (min-width: 992px) { + .float-lg-left { + float: left !important; + } + + .float-lg-right { + float: right !important; + } + + .float-lg-none { + float: none !important; + } +} +@media (min-width: 1200px) { + .float-xl-left { + float: left !important; + } + + .float-xl-right { + float: right !important; + } + + .float-xl-none { + float: none !important; + } +} +.position-static { + position: static !important; +} + +.position-relative { + position: relative !important; +} + +.position-absolute { + position: absolute !important; +} + +.position-fixed { + position: fixed !important; +} + +.position-sticky { + position: -webkit-sticky !important; + position: sticky !important; +} + +.fixed-top { + position: fixed; + top: 0; + right: 0; + left: 0; + z-index: 1030; +} + +.fixed-bottom { + position: fixed; + right: 0; + bottom: 0; + left: 0; + z-index: 1030; +} + +@supports ((position: -webkit-sticky) or (position: sticky)) { + .sticky-top { + position: -webkit-sticky; + position: sticky; + top: 0; + z-index: 1020; + } +} + +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + -webkit-clip-path: inset(50%); + clip-path: inset(50%); + border: 0; +} + +.sr-only-focusable:active, .sr-only-focusable:focus { + position: static; + width: auto; + height: auto; + overflow: visible; + clip: auto; + white-space: normal; + -webkit-clip-path: none; + clip-path: none; +} + +.w-25 { + width: 25% !important; +} + +.w-50 { + width: 50% !important; +} + +.w-75 { + width: 75% !important; +} + +.w-100 { + width: 100% !important; +} + +.h-25 { + height: 25% !important; +} + +.h-50 { + height: 50% !important; +} + +.h-75 { + height: 75% !important; +} + +.h-100 { + height: 100% !important; +} + +.mw-100 { + max-width: 100% !important; +} + +.mh-100 { + max-height: 100% !important; +} + +.m-0 { + margin: 0 !important; +} + +.mt-0, +.my-0 { + margin-top: 0 !important; +} + +.mr-0, +.mx-0 { + margin-right: 0 !important; +} + +.mb-0, +.my-0 { + margin-bottom: 0 !important; +} + +.ml-0, +.mx-0 { + margin-left: 0 !important; +} + +.m-1 { + margin: 0.25rem !important; +} + +.mt-1, +.my-1 { + margin-top: 0.25rem !important; +} + +.mr-1, +.mx-1 { + margin-right: 0.25rem !important; +} + +.mb-1, +.my-1 { + margin-bottom: 0.25rem !important; +} + +.ml-1, +.mx-1 { + margin-left: 0.25rem !important; +} + +.m-2 { + margin: 0.5rem !important; +} + +.mt-2, +.my-2 { + margin-top: 0.5rem !important; +} + +.mr-2, +.mx-2 { + margin-right: 0.5rem !important; +} + +.mb-2, +.my-2 { + margin-bottom: 0.5rem !important; +} + +.ml-2, +.mx-2 { + margin-left: 0.5rem !important; +} + +.m-3 { + margin: 1rem !important; +} + +.mt-3, +.my-3 { + margin-top: 1rem !important; +} + +.mr-3, +.mx-3 { + margin-right: 1rem !important; +} + +.mb-3, +.my-3 { + margin-bottom: 1rem !important; +} + +.ml-3, +.mx-3 { + margin-left: 1rem !important; +} + +.m-4 { + margin: 1.5rem !important; +} + +.mt-4, +.my-4 { + margin-top: 1.5rem !important; +} + +.mr-4, +.mx-4 { + margin-right: 1.5rem !important; +} + +.mb-4, +.my-4 { + margin-bottom: 1.5rem !important; +} + +.ml-4, +.mx-4 { + margin-left: 1.5rem !important; +} + +.m-5 { + margin: 3rem !important; +} + +.mt-5, +.my-5 { + margin-top: 3rem !important; +} + +.mr-5, +.mx-5 { + margin-right: 3rem !important; +} + +.mb-5, +.my-5 { + margin-bottom: 3rem !important; +} + +.ml-5, +.mx-5 { + margin-left: 3rem !important; +} + +.p-0 { + padding: 0 !important; +} + +.pt-0, +.py-0 { + padding-top: 0 !important; +} + +.pr-0, +.px-0 { + padding-right: 0 !important; +} + +.pb-0, +.py-0 { + padding-bottom: 0 !important; +} + +.pl-0, +.px-0 { + padding-left: 0 !important; +} + +.p-1 { + padding: 0.25rem !important; +} + +.pt-1, +.py-1 { + padding-top: 0.25rem !important; +} + +.pr-1, +.px-1 { + padding-right: 0.25rem !important; +} + +.pb-1, +.py-1 { + padding-bottom: 0.25rem !important; +} + +.pl-1, +.px-1 { + padding-left: 0.25rem !important; +} + +.p-2 { + padding: 0.5rem !important; +} + +.pt-2, +.py-2 { + padding-top: 0.5rem !important; +} + +.pr-2, +.px-2 { + padding-right: 0.5rem !important; +} + +.pb-2, +.py-2 { + padding-bottom: 0.5rem !important; +} + +.pl-2, +.px-2 { + padding-left: 0.5rem !important; +} + +.p-3 { + padding: 1rem !important; +} + +.pt-3, +.py-3 { + padding-top: 1rem !important; +} + +.pr-3, +.px-3 { + padding-right: 1rem !important; +} + +.pb-3, +.py-3 { + padding-bottom: 1rem !important; +} + +.pl-3, +.px-3 { + padding-left: 1rem !important; +} + +.p-4 { + padding: 1.5rem !important; +} + +.pt-4, +.py-4 { + padding-top: 1.5rem !important; +} + +.pr-4, +.px-4 { + padding-right: 1.5rem !important; +} + +.pb-4, +.py-4 { + padding-bottom: 1.5rem !important; +} + +.pl-4, +.px-4 { + padding-left: 1.5rem !important; +} + +.p-5 { + padding: 3rem !important; +} + +.pt-5, +.py-5 { + padding-top: 3rem !important; +} + +.pr-5, +.px-5 { + padding-right: 3rem !important; +} + +.pb-5, +.py-5 { + padding-bottom: 3rem !important; +} + +.pl-5, +.px-5 { + padding-left: 3rem !important; +} + +.m-auto { + margin: auto !important; +} + +.mt-auto, +.my-auto { + margin-top: auto !important; +} + +.mr-auto, +.mx-auto { + margin-right: auto !important; +} + +.mb-auto, +.my-auto { + margin-bottom: auto !important; +} + +.ml-auto, +.mx-auto { + margin-left: auto !important; +} + +@media (min-width: 576px) { + .m-sm-0 { + margin: 0 !important; + } + + .mt-sm-0, + .my-sm-0 { + margin-top: 0 !important; + } + + .mr-sm-0, + .mx-sm-0 { + margin-right: 0 !important; + } + + .mb-sm-0, + .my-sm-0 { + margin-bottom: 0 !important; + } + + .ml-sm-0, + .mx-sm-0 { + margin-left: 0 !important; + } + + .m-sm-1 { + margin: 0.25rem !important; + } + + .mt-sm-1, + .my-sm-1 { + margin-top: 0.25rem !important; + } + + .mr-sm-1, + .mx-sm-1 { + margin-right: 0.25rem !important; + } + + .mb-sm-1, + .my-sm-1 { + margin-bottom: 0.25rem !important; + } + + .ml-sm-1, + .mx-sm-1 { + margin-left: 0.25rem !important; + } + + .m-sm-2 { + margin: 0.5rem !important; + } + + .mt-sm-2, + .my-sm-2 { + margin-top: 0.5rem !important; + } + + .mr-sm-2, + .mx-sm-2 { + margin-right: 0.5rem !important; + } + + .mb-sm-2, + .my-sm-2 { + margin-bottom: 0.5rem !important; + } + + .ml-sm-2, + .mx-sm-2 { + margin-left: 0.5rem !important; + } + + .m-sm-3 { + margin: 1rem !important; + } + + .mt-sm-3, + .my-sm-3 { + margin-top: 1rem !important; + } + + .mr-sm-3, + .mx-sm-3 { + margin-right: 1rem !important; + } + + .mb-sm-3, + .my-sm-3 { + margin-bottom: 1rem !important; + } + + .ml-sm-3, + .mx-sm-3 { + margin-left: 1rem !important; + } + + .m-sm-4 { + margin: 1.5rem !important; + } + + .mt-sm-4, + .my-sm-4 { + margin-top: 1.5rem !important; + } + + .mr-sm-4, + .mx-sm-4 { + margin-right: 1.5rem !important; + } + + .mb-sm-4, + .my-sm-4 { + margin-bottom: 1.5rem !important; + } + + .ml-sm-4, + .mx-sm-4 { + margin-left: 1.5rem !important; + } + + .m-sm-5 { + margin: 3rem !important; + } + + .mt-sm-5, + .my-sm-5 { + margin-top: 3rem !important; + } + + .mr-sm-5, + .mx-sm-5 { + margin-right: 3rem !important; + } + + .mb-sm-5, + .my-sm-5 { + margin-bottom: 3rem !important; + } + + .ml-sm-5, + .mx-sm-5 { + margin-left: 3rem !important; + } + + .p-sm-0 { + padding: 0 !important; + } + + .pt-sm-0, + .py-sm-0 { + padding-top: 0 !important; + } + + .pr-sm-0, + .px-sm-0 { + padding-right: 0 !important; + } + + .pb-sm-0, + .py-sm-0 { + padding-bottom: 0 !important; + } + + .pl-sm-0, + .px-sm-0 { + padding-left: 0 !important; + } + + .p-sm-1 { + padding: 0.25rem !important; + } + + .pt-sm-1, + .py-sm-1 { + padding-top: 0.25rem !important; + } + + .pr-sm-1, + .px-sm-1 { + padding-right: 0.25rem !important; + } + + .pb-sm-1, + .py-sm-1 { + padding-bottom: 0.25rem !important; + } + + .pl-sm-1, + .px-sm-1 { + padding-left: 0.25rem !important; + } + + .p-sm-2 { + padding: 0.5rem !important; + } + + .pt-sm-2, + .py-sm-2 { + padding-top: 0.5rem !important; + } + + .pr-sm-2, + .px-sm-2 { + padding-right: 0.5rem !important; + } + + .pb-sm-2, + .py-sm-2 { + padding-bottom: 0.5rem !important; + } + + .pl-sm-2, + .px-sm-2 { + padding-left: 0.5rem !important; + } + + .p-sm-3 { + padding: 1rem !important; + } + + .pt-sm-3, + .py-sm-3 { + padding-top: 1rem !important; + } + + .pr-sm-3, + .px-sm-3 { + padding-right: 1rem !important; + } + + .pb-sm-3, + .py-sm-3 { + padding-bottom: 1rem !important; + } + + .pl-sm-3, + .px-sm-3 { + padding-left: 1rem !important; + } + + .p-sm-4 { + padding: 1.5rem !important; + } + + .pt-sm-4, + .py-sm-4 { + padding-top: 1.5rem !important; + } + + .pr-sm-4, + .px-sm-4 { + padding-right: 1.5rem !important; + } + + .pb-sm-4, + .py-sm-4 { + padding-bottom: 1.5rem !important; + } + + .pl-sm-4, + .px-sm-4 { + padding-left: 1.5rem !important; + } + + .p-sm-5 { + padding: 3rem !important; + } + + .pt-sm-5, + .py-sm-5 { + padding-top: 3rem !important; + } + + .pr-sm-5, + .px-sm-5 { + padding-right: 3rem !important; + } + + .pb-sm-5, + .py-sm-5 { + padding-bottom: 3rem !important; + } + + .pl-sm-5, + .px-sm-5 { + padding-left: 3rem !important; + } + + .m-sm-auto { + margin: auto !important; + } + + .mt-sm-auto, + .my-sm-auto { + margin-top: auto !important; + } + + .mr-sm-auto, + .mx-sm-auto { + margin-right: auto !important; + } + + .mb-sm-auto, + .my-sm-auto { + margin-bottom: auto !important; + } + + .ml-sm-auto, + .mx-sm-auto { + margin-left: auto !important; + } +} +@media (min-width: 768px) { + .m-md-0 { + margin: 0 !important; + } + + .mt-md-0, + .my-md-0 { + margin-top: 0 !important; + } + + .mr-md-0, + .mx-md-0 { + margin-right: 0 !important; + } + + .mb-md-0, + .my-md-0 { + margin-bottom: 0 !important; + } + + .ml-md-0, + .mx-md-0 { + margin-left: 0 !important; + } + + .m-md-1 { + margin: 0.25rem !important; + } + + .mt-md-1, + .my-md-1 { + margin-top: 0.25rem !important; + } + + .mr-md-1, + .mx-md-1 { + margin-right: 0.25rem !important; + } + + .mb-md-1, + .my-md-1 { + margin-bottom: 0.25rem !important; + } + + .ml-md-1, + .mx-md-1 { + margin-left: 0.25rem !important; + } + + .m-md-2 { + margin: 0.5rem !important; + } + + .mt-md-2, + .my-md-2 { + margin-top: 0.5rem !important; + } + + .mr-md-2, + .mx-md-2 { + margin-right: 0.5rem !important; + } + + .mb-md-2, + .my-md-2 { + margin-bottom: 0.5rem !important; + } + + .ml-md-2, + .mx-md-2 { + margin-left: 0.5rem !important; + } + + .m-md-3 { + margin: 1rem !important; + } + + .mt-md-3, + .my-md-3 { + margin-top: 1rem !important; + } + + .mr-md-3, + .mx-md-3 { + margin-right: 1rem !important; + } + + .mb-md-3, + .my-md-3 { + margin-bottom: 1rem !important; + } + + .ml-md-3, + .mx-md-3 { + margin-left: 1rem !important; + } + + .m-md-4 { + margin: 1.5rem !important; + } + + .mt-md-4, + .my-md-4 { + margin-top: 1.5rem !important; + } + + .mr-md-4, + .mx-md-4 { + margin-right: 1.5rem !important; + } + + .mb-md-4, + .my-md-4 { + margin-bottom: 1.5rem !important; + } + + .ml-md-4, + .mx-md-4 { + margin-left: 1.5rem !important; + } + + .m-md-5 { + margin: 3rem !important; + } + + .mt-md-5, + .my-md-5 { + margin-top: 3rem !important; + } + + .mr-md-5, + .mx-md-5 { + margin-right: 3rem !important; + } + + .mb-md-5, + .my-md-5 { + margin-bottom: 3rem !important; + } + + .ml-md-5, + .mx-md-5 { + margin-left: 3rem !important; + } + + .p-md-0 { + padding: 0 !important; + } + + .pt-md-0, + .py-md-0 { + padding-top: 0 !important; + } + + .pr-md-0, + .px-md-0 { + padding-right: 0 !important; + } + + .pb-md-0, + .py-md-0 { + padding-bottom: 0 !important; + } + + .pl-md-0, + .px-md-0 { + padding-left: 0 !important; + } + + .p-md-1 { + padding: 0.25rem !important; + } + + .pt-md-1, + .py-md-1 { + padding-top: 0.25rem !important; + } + + .pr-md-1, + .px-md-1 { + padding-right: 0.25rem !important; + } + + .pb-md-1, + .py-md-1 { + padding-bottom: 0.25rem !important; + } + + .pl-md-1, + .px-md-1 { + padding-left: 0.25rem !important; + } + + .p-md-2 { + padding: 0.5rem !important; + } + + .pt-md-2, + .py-md-2 { + padding-top: 0.5rem !important; + } + + .pr-md-2, + .px-md-2 { + padding-right: 0.5rem !important; + } + + .pb-md-2, + .py-md-2 { + padding-bottom: 0.5rem !important; + } + + .pl-md-2, + .px-md-2 { + padding-left: 0.5rem !important; + } + + .p-md-3 { + padding: 1rem !important; + } + + .pt-md-3, + .py-md-3 { + padding-top: 1rem !important; + } + + .pr-md-3, + .px-md-3 { + padding-right: 1rem !important; + } + + .pb-md-3, + .py-md-3 { + padding-bottom: 1rem !important; + } + + .pl-md-3, + .px-md-3 { + padding-left: 1rem !important; + } + + .p-md-4 { + padding: 1.5rem !important; + } + + .pt-md-4, + .py-md-4 { + padding-top: 1.5rem !important; + } + + .pr-md-4, + .px-md-4 { + padding-right: 1.5rem !important; + } + + .pb-md-4, + .py-md-4 { + padding-bottom: 1.5rem !important; + } + + .pl-md-4, + .px-md-4 { + padding-left: 1.5rem !important; + } + + .p-md-5 { + padding: 3rem !important; + } + + .pt-md-5, + .py-md-5 { + padding-top: 3rem !important; + } + + .pr-md-5, + .px-md-5 { + padding-right: 3rem !important; + } + + .pb-md-5, + .py-md-5 { + padding-bottom: 3rem !important; + } + + .pl-md-5, + .px-md-5 { + padding-left: 3rem !important; + } + + .m-md-auto { + margin: auto !important; + } + + .mt-md-auto, + .my-md-auto { + margin-top: auto !important; + } + + .mr-md-auto, + .mx-md-auto { + margin-right: auto !important; + } + + .mb-md-auto, + .my-md-auto { + margin-bottom: auto !important; + } + + .ml-md-auto, + .mx-md-auto { + margin-left: auto !important; + } +} +@media (min-width: 992px) { + .m-lg-0 { + margin: 0 !important; + } + + .mt-lg-0, + .my-lg-0 { + margin-top: 0 !important; + } + + .mr-lg-0, + .mx-lg-0 { + margin-right: 0 !important; + } + + .mb-lg-0, + .my-lg-0 { + margin-bottom: 0 !important; + } + + .ml-lg-0, + .mx-lg-0 { + margin-left: 0 !important; + } + + .m-lg-1 { + margin: 0.25rem !important; + } + + .mt-lg-1, + .my-lg-1 { + margin-top: 0.25rem !important; + } + + .mr-lg-1, + .mx-lg-1 { + margin-right: 0.25rem !important; + } + + .mb-lg-1, + .my-lg-1 { + margin-bottom: 0.25rem !important; + } + + .ml-lg-1, + .mx-lg-1 { + margin-left: 0.25rem !important; + } + + .m-lg-2 { + margin: 0.5rem !important; + } + + .mt-lg-2, + .my-lg-2 { + margin-top: 0.5rem !important; + } + + .mr-lg-2, + .mx-lg-2 { + margin-right: 0.5rem !important; + } + + .mb-lg-2, + .my-lg-2 { + margin-bottom: 0.5rem !important; + } + + .ml-lg-2, + .mx-lg-2 { + margin-left: 0.5rem !important; + } + + .m-lg-3 { + margin: 1rem !important; + } + + .mt-lg-3, + .my-lg-3 { + margin-top: 1rem !important; + } + + .mr-lg-3, + .mx-lg-3 { + margin-right: 1rem !important; + } + + .mb-lg-3, + .my-lg-3 { + margin-bottom: 1rem !important; + } + + .ml-lg-3, + .mx-lg-3 { + margin-left: 1rem !important; + } + + .m-lg-4 { + margin: 1.5rem !important; + } + + .mt-lg-4, + .my-lg-4 { + margin-top: 1.5rem !important; + } + + .mr-lg-4, + .mx-lg-4 { + margin-right: 1.5rem !important; + } + + .mb-lg-4, + .my-lg-4 { + margin-bottom: 1.5rem !important; + } + + .ml-lg-4, + .mx-lg-4 { + margin-left: 1.5rem !important; + } + + .m-lg-5 { + margin: 3rem !important; + } + + .mt-lg-5, + .my-lg-5 { + margin-top: 3rem !important; + } + + .mr-lg-5, + .mx-lg-5 { + margin-right: 3rem !important; + } + + .mb-lg-5, + .my-lg-5 { + margin-bottom: 3rem !important; + } + + .ml-lg-5, + .mx-lg-5 { + margin-left: 3rem !important; + } + + .p-lg-0 { + padding: 0 !important; + } + + .pt-lg-0, + .py-lg-0 { + padding-top: 0 !important; + } + + .pr-lg-0, + .px-lg-0 { + padding-right: 0 !important; + } + + .pb-lg-0, + .py-lg-0 { + padding-bottom: 0 !important; + } + + .pl-lg-0, + .px-lg-0 { + padding-left: 0 !important; + } + + .p-lg-1 { + padding: 0.25rem !important; + } + + .pt-lg-1, + .py-lg-1 { + padding-top: 0.25rem !important; + } + + .pr-lg-1, + .px-lg-1 { + padding-right: 0.25rem !important; + } + + .pb-lg-1, + .py-lg-1 { + padding-bottom: 0.25rem !important; + } + + .pl-lg-1, + .px-lg-1 { + padding-left: 0.25rem !important; + } + + .p-lg-2 { + padding: 0.5rem !important; + } + + .pt-lg-2, + .py-lg-2 { + padding-top: 0.5rem !important; + } + + .pr-lg-2, + .px-lg-2 { + padding-right: 0.5rem !important; + } + + .pb-lg-2, + .py-lg-2 { + padding-bottom: 0.5rem !important; + } + + .pl-lg-2, + .px-lg-2 { + padding-left: 0.5rem !important; + } + + .p-lg-3 { + padding: 1rem !important; + } + + .pt-lg-3, + .py-lg-3 { + padding-top: 1rem !important; + } + + .pr-lg-3, + .px-lg-3 { + padding-right: 1rem !important; + } + + .pb-lg-3, + .py-lg-3 { + padding-bottom: 1rem !important; + } + + .pl-lg-3, + .px-lg-3 { + padding-left: 1rem !important; + } + + .p-lg-4 { + padding: 1.5rem !important; + } + + .pt-lg-4, + .py-lg-4 { + padding-top: 1.5rem !important; + } + + .pr-lg-4, + .px-lg-4 { + padding-right: 1.5rem !important; + } + + .pb-lg-4, + .py-lg-4 { + padding-bottom: 1.5rem !important; + } + + .pl-lg-4, + .px-lg-4 { + padding-left: 1.5rem !important; + } + + .p-lg-5 { + padding: 3rem !important; + } + + .pt-lg-5, + .py-lg-5 { + padding-top: 3rem !important; + } + + .pr-lg-5, + .px-lg-5 { + padding-right: 3rem !important; + } + + .pb-lg-5, + .py-lg-5 { + padding-bottom: 3rem !important; + } + + .pl-lg-5, + .px-lg-5 { + padding-left: 3rem !important; + } + + .m-lg-auto { + margin: auto !important; + } + + .mt-lg-auto, + .my-lg-auto { + margin-top: auto !important; + } + + .mr-lg-auto, + .mx-lg-auto { + margin-right: auto !important; + } + + .mb-lg-auto, + .my-lg-auto { + margin-bottom: auto !important; + } + + .ml-lg-auto, + .mx-lg-auto { + margin-left: auto !important; + } +} +@media (min-width: 1200px) { + .m-xl-0 { + margin: 0 !important; + } + + .mt-xl-0, + .my-xl-0 { + margin-top: 0 !important; + } + + .mr-xl-0, + .mx-xl-0 { + margin-right: 0 !important; + } + + .mb-xl-0, + .my-xl-0 { + margin-bottom: 0 !important; + } + + .ml-xl-0, + .mx-xl-0 { + margin-left: 0 !important; + } + + .m-xl-1 { + margin: 0.25rem !important; + } + + .mt-xl-1, + .my-xl-1 { + margin-top: 0.25rem !important; + } + + .mr-xl-1, + .mx-xl-1 { + margin-right: 0.25rem !important; + } + + .mb-xl-1, + .my-xl-1 { + margin-bottom: 0.25rem !important; + } + + .ml-xl-1, + .mx-xl-1 { + margin-left: 0.25rem !important; + } + + .m-xl-2 { + margin: 0.5rem !important; + } + + .mt-xl-2, + .my-xl-2 { + margin-top: 0.5rem !important; + } + + .mr-xl-2, + .mx-xl-2 { + margin-right: 0.5rem !important; + } + + .mb-xl-2, + .my-xl-2 { + margin-bottom: 0.5rem !important; + } + + .ml-xl-2, + .mx-xl-2 { + margin-left: 0.5rem !important; + } + + .m-xl-3 { + margin: 1rem !important; + } + + .mt-xl-3, + .my-xl-3 { + margin-top: 1rem !important; + } + + .mr-xl-3, + .mx-xl-3 { + margin-right: 1rem !important; + } + + .mb-xl-3, + .my-xl-3 { + margin-bottom: 1rem !important; + } + + .ml-xl-3, + .mx-xl-3 { + margin-left: 1rem !important; + } + + .m-xl-4 { + margin: 1.5rem !important; + } + + .mt-xl-4, + .my-xl-4 { + margin-top: 1.5rem !important; + } + + .mr-xl-4, + .mx-xl-4 { + margin-right: 1.5rem !important; + } + + .mb-xl-4, + .my-xl-4 { + margin-bottom: 1.5rem !important; + } + + .ml-xl-4, + .mx-xl-4 { + margin-left: 1.5rem !important; + } + + .m-xl-5 { + margin: 3rem !important; + } + + .mt-xl-5, + .my-xl-5 { + margin-top: 3rem !important; + } + + .mr-xl-5, + .mx-xl-5 { + margin-right: 3rem !important; + } + + .mb-xl-5, + .my-xl-5 { + margin-bottom: 3rem !important; + } + + .ml-xl-5, + .mx-xl-5 { + margin-left: 3rem !important; + } + + .p-xl-0 { + padding: 0 !important; + } + + .pt-xl-0, + .py-xl-0 { + padding-top: 0 !important; + } + + .pr-xl-0, + .px-xl-0 { + padding-right: 0 !important; + } + + .pb-xl-0, + .py-xl-0 { + padding-bottom: 0 !important; + } + + .pl-xl-0, + .px-xl-0 { + padding-left: 0 !important; + } + + .p-xl-1 { + padding: 0.25rem !important; + } + + .pt-xl-1, + .py-xl-1 { + padding-top: 0.25rem !important; + } + + .pr-xl-1, + .px-xl-1 { + padding-right: 0.25rem !important; + } + + .pb-xl-1, + .py-xl-1 { + padding-bottom: 0.25rem !important; + } + + .pl-xl-1, + .px-xl-1 { + padding-left: 0.25rem !important; + } + + .p-xl-2 { + padding: 0.5rem !important; + } + + .pt-xl-2, + .py-xl-2 { + padding-top: 0.5rem !important; + } + + .pr-xl-2, + .px-xl-2 { + padding-right: 0.5rem !important; + } + + .pb-xl-2, + .py-xl-2 { + padding-bottom: 0.5rem !important; + } + + .pl-xl-2, + .px-xl-2 { + padding-left: 0.5rem !important; + } + + .p-xl-3 { + padding: 1rem !important; + } + + .pt-xl-3, + .py-xl-3 { + padding-top: 1rem !important; + } + + .pr-xl-3, + .px-xl-3 { + padding-right: 1rem !important; + } + + .pb-xl-3, + .py-xl-3 { + padding-bottom: 1rem !important; + } + + .pl-xl-3, + .px-xl-3 { + padding-left: 1rem !important; + } + + .p-xl-4 { + padding: 1.5rem !important; + } + + .pt-xl-4, + .py-xl-4 { + padding-top: 1.5rem !important; + } + + .pr-xl-4, + .px-xl-4 { + padding-right: 1.5rem !important; + } + + .pb-xl-4, + .py-xl-4 { + padding-bottom: 1.5rem !important; + } + + .pl-xl-4, + .px-xl-4 { + padding-left: 1.5rem !important; + } + + .p-xl-5 { + padding: 3rem !important; + } + + .pt-xl-5, + .py-xl-5 { + padding-top: 3rem !important; + } + + .pr-xl-5, + .px-xl-5 { + padding-right: 3rem !important; + } + + .pb-xl-5, + .py-xl-5 { + padding-bottom: 3rem !important; + } + + .pl-xl-5, + .px-xl-5 { + padding-left: 3rem !important; + } + + .m-xl-auto { + margin: auto !important; + } + + .mt-xl-auto, + .my-xl-auto { + margin-top: auto !important; + } + + .mr-xl-auto, + .mx-xl-auto { + margin-right: auto !important; + } + + .mb-xl-auto, + .my-xl-auto { + margin-bottom: auto !important; + } + + .ml-xl-auto, + .mx-xl-auto { + margin-left: auto !important; + } +} +.text-justify { + text-align: justify !important; +} + +.text-nowrap { + white-space: nowrap !important; +} + +.text-truncate { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.text-left { + text-align: left !important; +} + +.text-right { + text-align: right !important; +} + +.text-center { + text-align: center !important; +} + +@media (min-width: 576px) { + .text-sm-left { + text-align: left !important; + } + + .text-sm-right { + text-align: right !important; + } + + .text-sm-center { + text-align: center !important; + } +} +@media (min-width: 768px) { + .text-md-left { + text-align: left !important; + } + + .text-md-right { + text-align: right !important; + } + + .text-md-center { + text-align: center !important; + } +} +@media (min-width: 992px) { + .text-lg-left { + text-align: left !important; + } + + .text-lg-right { + text-align: right !important; + } + + .text-lg-center { + text-align: center !important; + } +} +@media (min-width: 1200px) { + .text-xl-left { + text-align: left !important; + } + + .text-xl-right { + text-align: right !important; + } + + .text-xl-center { + text-align: center !important; + } +} +.text-lowercase { + text-transform: lowercase !important; +} + +.text-uppercase { + text-transform: uppercase !important; +} + +.text-capitalize { + text-transform: capitalize !important; +} + +.font-weight-light { + font-weight: 300 !important; +} + +.font-weight-normal { + font-weight: 400 !important; +} + +.font-weight-bold { + font-weight: 700 !important; +} + +.font-italic { + font-style: italic !important; +} + +.text-white { + color: #fff !important; +} + +.text-primary { + color: #007bff !important; +} + +a.text-primary:hover, a.text-primary:focus { + color: #0062cc !important; +} + +.text-secondary { + color: #6c757d !important; +} + +a.text-secondary:hover, a.text-secondary:focus { + color: #545b62 !important; +} + +.text-success { + color: #28a745 !important; +} + +a.text-success:hover, a.text-success:focus { + color: #1e7e34 !important; +} + +.text-info { + color: #17a2b8 !important; +} + +a.text-info:hover, a.text-info:focus { + color: #117a8b !important; +} + +.text-warning { + color: #ffc107 !important; +} + +a.text-warning:hover, a.text-warning:focus { + color: #d39e00 !important; +} + +.text-danger { + color: #dc3545 !important; +} + +a.text-danger:hover, a.text-danger:focus { + color: #bd2130 !important; +} + +.text-light { + color: #f8f9fa !important; +} + +a.text-light:hover, a.text-light:focus { + color: #dae0e5 !important; +} + +.text-dark { + color: #343a40 !important; +} + +a.text-dark:hover, a.text-dark:focus { + color: #1d2124 !important; +} + +.text-muted { + color: #6c757d !important; +} + +.text-hide { + font: 0/0 a; + color: transparent; + text-shadow: none; + background-color: transparent; + border: 0; +} + +.visible { + visibility: visible !important; +} + +.invisible { + visibility: hidden !important; +} + +@media print { + *, + *::before, + *::after { + text-shadow: none !important; + -webkit-box-shadow: none !important; + box-shadow: none !important; + } + + a:not(.btn) { + text-decoration: underline; + } + + abbr[title]::after { + content: " (" attr(title) ")"; + } + + pre { + white-space: pre-wrap !important; + } + + pre, + blockquote { + border: 1px solid #999; + page-break-inside: avoid; + } + + thead { + display: table-header-group; + } + + tr, + img { + page-break-inside: avoid; + } + + p, + h2, + h3 { + orphans: 3; + widows: 3; + } + + h2, + h3 { + page-break-after: avoid; + } + + @page { + size: a3; + } + body { + min-width: 992px !important; + } + + .container { + min-width: 992px !important; + } + + .navbar { + display: none; + } + + .badge { + border: 1px solid #000; + } + + .table { + border-collapse: collapse !important; + } + .table td, + .table th { + background-color: #fff !important; + } + + .table-bordered th, + .table-bordered td { + border: 1px solid #ddd !important; + } +} +/*Github syntax highlighting theme via Rouge*/ +.highlight table td { + padding: 5px; +} + +.highlight table pre { + margin: 0; +} + +.highlight .cm { + color: #999988; + font-style: italic; +} + +.highlight .cp { + color: #999999; + font-weight: bold; +} + +.highlight .c1 { + color: #999988; + font-style: italic; +} + +.highlight .cs { + color: #999999; + font-weight: bold; + font-style: italic; +} + +.highlight .c, .highlight .cd { + color: #999988; + font-style: italic; +} + +.highlight .err { + color: #a61717; + background-color: #e3d2d2; +} + +.highlight .gd { + color: #000000; + background-color: #ffdddd; +} + +.highlight .ge { + color: #000000; + font-style: italic; +} + +.highlight .gr { + color: #aa0000; +} + +.highlight .gh { + color: #999999; +} + +.highlight .gi { + color: #000000; + background-color: #ddffdd; +} + +.highlight .go { + color: #888888; +} + +.highlight .gp { + color: #555555; +} + +.highlight .gs { + font-weight: bold; +} + +.highlight .gu { + color: #aaaaaa; +} + +.highlight .gt { + color: #aa0000; +} + +.highlight .kc { + color: #000000; + font-weight: bold; +} + +.highlight .kd { + color: #000000; + font-weight: bold; +} + +.highlight .kn { + color: #000000; + font-weight: bold; +} + +.highlight .kp { + color: #000000; + font-weight: bold; +} + +.highlight .kr { + color: #000000; + font-weight: bold; +} + +.highlight .kt { + color: #445588; + font-weight: bold; +} + +.highlight .k, .highlight .kv { + color: #000000; + font-weight: bold; +} + +.highlight .mf { + color: #009999; +} + +.highlight .mh { + color: #009999; +} + +.highlight .il { + color: #009999; +} + +.highlight .mi { + color: #009999; +} + +.highlight .mo { + color: #009999; +} + +.highlight .m, .highlight .mb, .highlight .mx { + color: #009999; +} + +.highlight .sb { + color: #d14; +} + +.highlight .sc { + color: #d14; +} + +.highlight .sd { + color: #d14; +} + +.highlight .s2 { + color: #d14; +} + +.highlight .se { + color: #d14; +} + +.highlight .sh { + color: #d14; +} + +.highlight .si { + color: #d14; +} + +.highlight .sx { + color: #d14; +} + +.highlight .sr { + color: #009926; +} + +.highlight .s1 { + color: #d14; +} + +.highlight .ss { + color: #990073; +} + +.highlight .s { + color: #d14; +} + +.highlight .na { + color: #008080; +} + +.highlight .bp { + color: #999999; +} + +.highlight .nb { + color: #0086B3; +} + +.highlight .nc { + color: #445588; + font-weight: bold; +} + +.highlight .no { + color: #008080; +} + +.highlight .nd { + color: #3c5d5d; + font-weight: bold; +} + +.highlight .ni { + color: #800080; +} + +.highlight .ne { + color: #990000; + font-weight: bold; +} + +.highlight .nf { + color: #990000; + font-weight: bold; +} + +.highlight .nl { + color: #990000; + font-weight: bold; +} + +.highlight .nn { + color: #555555; +} + +.highlight .nt { + color: #000080; +} + +.highlight .vc { + color: #008080; +} + +.highlight .vg { + color: #008080; +} + +.highlight .vi { + color: #008080; +} + +.highlight .nv { + color: #008080; +} + +.highlight .ow { + color: #000000; + font-weight: bold; +} + +.highlight .o { + color: #000000; + font-weight: bold; +} + +.highlight .w { + color: #bbbbbb; +} + +.highlight { + background-color: #f8f8f8; +} + +@font-face { + font-family: FreightSans; + font-weight: 700; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-bold.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-bold.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 700; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-bold-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-bold-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 500; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-medium.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-medium.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 500; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-medium-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-medium-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 100; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-light.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-light.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 100; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-light-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-light-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 400; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-book-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-book-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 400; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-book.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-book.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 600; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-SemiBold"), url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 500; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Medium"), url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 400; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Regular"), url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 300; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Light"), url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff") format("woff"); +} +html { + position: relative; + min-height: 100%; + font-size: 12px; +} +@media screen and (min-width: 768px) { + html { + font-size: 16px; + } +} + +* { + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +body { + font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; +} + +a:link, +a:visited, +a:hover { + text-decoration: none; + color: #e44c2c; +} + +a.with-right-arrow, .btn.with-right-arrow { + padding-right: 1.375rem; + position: relative; + background-image: url("../images/chevron-right-orange.svg"); + background-size: 6px 13px; + background-position: center right 5px; + background-repeat: no-repeat; +} +@media screen and (min-width: 768px) { + a.with-right-arrow, .btn.with-right-arrow { + background-size: 8px 14px; + background-position: center right 12px; + padding-right: 2rem; + } +} + +::-webkit-input-placeholder { + color: #e44c2c; +} + +::-moz-placeholder { + color: #e44c2c; +} + +:-ms-input-placeholder { + color: #e44c2c; +} + +:-moz-placeholder { + color: #e44c2c; +} + +.email-subscribe-form input.email { + color: #e44c2c; + border: none; + border-bottom: 1px solid #939393; + width: 100%; + background-color: transparent; + outline: none; + font-size: 1.125rem; + letter-spacing: 0.25px; + line-height: 2.25rem; +} +.email-subscribe-form input[type="submit"] { + position: absolute; + right: 0; + top: 10px; + height: 15px; + width: 15px; + background-image: url("../images/arrow-right-with-tail.svg"); + background-color: transparent; + background-repeat: no-repeat; + background-size: 15px 15px; + background-position: center center; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + border: 0; +} + +.email-subscribe-form-fields-wrapper { + position: relative; +} + +.anchorjs-link { + color: #6c6c6d !important; +} +@media screen and (min-width: 768px) { + .anchorjs-link:hover { + color: inherit; + text-decoration: none !important; + } +} + +.pytorch-article #table-of-contents { + display: none; +} + +code, kbd, pre, samp { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; +} +code span, kbd span, pre span, samp span { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; +} + +pre { + padding: 1.125rem; + background-color: #f3f4f7; +} +pre code { + font-size: 0.875rem; +} +pre.highlight { + background-color: #f3f4f7; + line-height: 1.3125rem; +} + +code.highlighter-rouge { + color: #6c6c6d; + background-color: #f3f4f7; + padding: 2px 6px; +} + +a:link code.highlighter-rouge, +a:visited code.highlighter-rouge, +a:hover code.highlighter-rouge { + color: #4974D1; +} +a:link.has-code, +a:visited.has-code, +a:hover.has-code { + color: #4974D1; +} + +p code, +h1 code, +h2 code, +h3 code, +h4 code, +h5 code, +h6 code { + font-size: 78.5%; +} + +pre { + white-space: pre-wrap; + white-space: -moz-pre-wrap; + white-space: -pre-wrap; + white-space: -o-pre-wrap; + word-wrap: break-word; +} + +.header-holder { + height: 68px; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + left: 0; + margin-left: auto; + margin-right: auto; + position: fixed; + right: 0; + top: 0; + width: 100%; + z-index: 9999; + background-color: #ffffff; + border-bottom: 1px solid #e2e2e2; +} +@media screen and (min-width: 1100px) { + .header-holder { + height: 90px; + } +} + +.header-container { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.header-container:before, .header-container:after { + content: ""; + display: table; +} +.header-container:after { + clear: both; +} +.header-container { + *zoom: 1; +} +@media screen and (min-width: 1100px) { + .header-container { + display: block; + } +} + +.header-logo { + height: 23px; + width: 93px; + background-image: url("../images/logo.svg"); + background-repeat: no-repeat; + background-size: 93px 23px; + display: block; + float: left; + z-index: 10; +} +@media screen and (min-width: 1100px) { + .header-logo { + background-size: 108px 27px; + position: absolute; + height: 27px; + width: 108px; + top: 4px; + float: none; + } +} + +.main-menu-open-button { + background-image: url("../images/icon-menu-dots.svg"); + background-position: center center; + background-size: 25px 7px; + background-repeat: no-repeat; + width: 25px; + height: 17px; + position: absolute; + right: 0; + top: 4px; +} +@media screen and (min-width: 1100px) { + .main-menu-open-button { + display: none; + } +} + +.header-holder .main-menu { + display: none; +} +@media screen and (min-width: 1100px) { + .header-holder .main-menu { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + } +} +.header-holder .main-menu ul { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + margin: 0; +} +.header-holder .main-menu ul li { + display: inline-block; + margin-right: 40px; + position: relative; +} +.header-holder .main-menu ul li.active:after { + content: "•"; + bottom: -24px; + color: #e44c2c; + font-size: 1.375rem; + left: 0; + position: absolute; + right: 0; + text-align: center; +} +.header-holder .main-menu ul li.active a { + color: #e44c2c; +} +.header-holder .main-menu ul li:last-of-type { + margin-right: 0; +} +.header-holder .main-menu ul li a { + color: #ffffff; + font-size: 1.125rem; + letter-spacing: 0; + line-height: 2.125rem; + text-align: center; + text-decoration: none; +} +@media screen and (min-width: 1100px) { + .header-holder .main-menu ul li a:hover { + color: #e44c2c; + } +} + +.mobile-main-menu { + display: none; +} +.mobile-main-menu.open { + background-color: #262626; + display: block; + height: 100%; + left: 0; + margin-left: auto; + margin-right: auto; + min-height: 100%; + position: fixed; + right: 0; + top: 0; + width: 100%; + z-index: 99999; +} + +.mobile-main-menu .container-fluid { + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 68px; + position: relative; +} +.mobile-main-menu .container-fluid:before, .mobile-main-menu .container-fluid:after { + content: ""; + display: table; +} +.mobile-main-menu .container-fluid:after { + clear: both; +} +.mobile-main-menu .container-fluid { + *zoom: 1; +} + +.mobile-main-menu.open ul { + list-style-type: none; + padding: 0; +} +.mobile-main-menu.open ul li a { + font-size: 2rem; + color: #ffffff; + letter-spacing: 0; + line-height: 4rem; + text-decoration: none; +} +.mobile-main-menu.open ul li.active a { + color: #e44c2c; +} + +.main-menu-close-button { + background-image: url("../images/icon-close.svg"); + background-position: center center; + background-repeat: no-repeat; + background-size: 24px 24px; + height: 24px; + position: absolute; + right: 0; + width: 24px; + top: -4px; +} + +.mobile-main-menu-header-container { + position: relative; +} + +.mobile-main-menu-links-container { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding-left: 2.8125rem; + height: 100%; + min-height: 100%; + margin-top: -68px; +} + +.site-footer { + padding: 2.5rem 0; + width: 100%; + background: #000000; + background-size: 100%; + margin-left: 0; + margin-right: 0; + position: relative; + z-index: 201; +} +@media screen and (min-width: 768px) { + .site-footer { + padding: 5rem 0; + } +} +.site-footer p { + color: #ffffff; +} +.site-footer ul { + list-style-type: none; + padding-left: 0; + margin-bottom: 0; +} +.site-footer ul li { + font-size: 1.125rem; + line-height: 2rem; + color: #A0A0A1; + padding-bottom: 0.375rem; +} +.site-footer ul li.list-title { + padding-bottom: 0.75rem; + color: #ffffff; +} +.site-footer a:link, +.site-footer a:visited { + color: inherit; +} +@media screen and (min-width: 768px) { + .site-footer a:hover { + color: #e44c2c; + } +} + +.docs-tutorials-resources { + background-color: #262626; + color: #ffffff; + padding-top: 2.5rem; + padding-bottom: 2.5rem; + position: relative; + z-index: 201; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources { + padding-top: 5rem; + padding-bottom: 5rem; + } +} +.docs-tutorials-resources p { + color: #929292; + font-size: 1.125rem; +} +.docs-tutorials-resources h2 { + font-size: 1.5rem; + letter-spacing: -0.25px; + text-transform: none; + margin-bottom: 0.25rem; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources h2 { + margin-bottom: 1.25rem; + } +} +.docs-tutorials-resources .col-md-4 { + margin-bottom: 2rem; + text-align: center; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources .col-md-4 { + margin-bottom: 0; + } +} +.docs-tutorials-resources .with-right-arrow { + margin-left: 12px; +} +.docs-tutorials-resources .with-right-arrow:hover { + background-image: url("../images/chevron-right-white.svg"); +} +.docs-tutorials-resources p { + font-size: 1rem; + line-height: 1.5rem; + letter-spacing: 0.22px; + color: #939393; + margin-bottom: 0; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources p { + margin-bottom: 1.25rem; + } +} +.docs-tutorials-resources a { + font-size: 1.125rem; + color: #e44c2c; +} +.docs-tutorials-resources a:hover { + color: #ffffff; +} + +.footer-container { + position: relative; +} + +@media screen and (min-width: 768px) { + .footer-logo-wrapper { + position: absolute; + top: 0; + left: 30px; + } +} + +.footer-logo { + background-image: url("../images/logo-icon.svg"); + background-position: center; + background-repeat: no-repeat; + background-size: 20px 24px; + display: block; + height: 24px; + margin-bottom: 2.8125rem; + width: 20px; +} +@media screen and (min-width: 768px) { + .footer-logo { + background-size: 29px 36px; + height: 36px; + margin-bottom: 0; + margin-bottom: 0; + width: 29px; + } +} + +.footer-links-wrapper { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; +} +@media screen and (min-width: 768px) { + .footer-links-wrapper { + -ms-flex-wrap: initial; + flex-wrap: initial; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + } +} + +.footer-links-col { + margin-bottom: 3.75rem; + width: 50%; +} +@media screen and (min-width: 768px) { + .footer-links-col { + margin-bottom: 0; + width: 14%; + margin-right: 23px; + } + .footer-links-col.follow-us-col { + width: 18%; + margin-right: 0; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .footer-links-col { + width: 18%; + margin-right: 30px; + } +} + +.footer-social-icons { + margin: 8.5625rem 0 2.5rem 0; +} +.footer-social-icons a { + height: 32px; + width: 32px; + display: inline-block; + background-color: #CCCDD1; + border-radius: 50%; + margin-right: 5px; +} +.footer-social-icons a.facebook { + background-image: url("../images/logo-facebook-dark.svg"); + background-position: center center; + background-size: 9px 18px; + background-repeat: no-repeat; +} +.footer-social-icons a.twitter { + background-image: url("../images/logo-twitter-dark.svg"); + background-position: center center; + background-size: 17px 17px; + background-repeat: no-repeat; +} +.footer-social-icons a.youtube { + background-image: url("../images/logo-youtube-dark.svg"); + background-position: center center; + background-repeat: no-repeat; +} + +.site-footer .mc-field-group { + margin-top: -2px; +} + +article.pytorch-article { + max-width: 920px; + margin: 0 auto; +} +article.pytorch-article h2, +article.pytorch-article h3, +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + margin: 1.375rem 0; + color: #262626; +} +article.pytorch-article h2 { + font-size: 1.625rem; + letter-spacing: 1.33px; + line-height: 2rem; + text-transform: none; +} +article.pytorch-article h3 { + font-size: 1.5rem; + letter-spacing: -0.25px; + line-height: 1.875rem; + text-transform: none; +} +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + font-size: 1.125rem; + letter-spacing: -0.19px; + line-height: 1.875rem; +} +article.pytorch-article p { + margin-bottom: 1.125rem; +} +article.pytorch-article p, +article.pytorch-article ul li, +article.pytorch-article ol li, +article.pytorch-article dl dt, +article.pytorch-article dl dd, +article.pytorch-article blockquote { + font-size: 1rem; + line-height: 1.375rem; + color: #262626; + letter-spacing: 0.01px; + font-weight: 500; +} +article.pytorch-article table { + margin-bottom: 2.5rem; + width: 100%; +} +article.pytorch-article table thead { + border-bottom: 1px solid #cacaca; +} +article.pytorch-article table th { + padding: 0.625rem; + color: #262626; +} +article.pytorch-article table td { + padding: 0.3125rem; +} +article.pytorch-article table tr th:first-of-type, +article.pytorch-article table tr td:first-of-type { + padding-left: 0; +} +article.pytorch-article table.docutils.field-list th.field-name { + padding: 0.3125rem; + padding-left: 0; +} +article.pytorch-article table.docutils.field-list td.field-body { + padding: 0.3125rem; +} +article.pytorch-article table.docutils.field-list td.field-body p:last-of-type { + margin-bottom: 0; +} +article.pytorch-article ul, +article.pytorch-article ol { + margin: 1.5rem 0 3.125rem 0; +} +@media screen and (min-width: 768px) { + article.pytorch-article ul, + article.pytorch-article ol { + padding-left: 6.25rem; + } +} +article.pytorch-article ul li, +article.pytorch-article ol li { + margin-bottom: 0.625rem; +} +article.pytorch-article dl { + margin-bottom: 1.5rem; +} +article.pytorch-article dl dt { + margin-bottom: 0.75rem; +} +article.pytorch-article pre { + margin-bottom: 2.5rem; +} +article.pytorch-article hr { + margin-top: 4.6875rem; + margin-bottom: 4.6875rem; +} +article.pytorch-article blockquote { + margin: 0 auto; + margin-bottom: 2.5rem; + width: 65%; +} +article.pytorch-article img { + width: 100%; +} + +html { + height: 100%; +} +@media screen and (min-width: 768px) { + html { + font-size: 16px; + } +} + +body { + background: #ffffff; + height: 100%; + margin: 0; +} +body.no-scroll { + height: 100%; + overflow: hidden; +} + +p { + margin-top: 0; + margin-bottom: 1.125rem; +} +p a:link, +p a:visited, +p a:hover { + color: #e44c2c; + text-decoration: none; +} +@media screen and (min-width: 768px) { + p a:hover { + text-decoration: underline; + } +} +p a:link, +p a:visited, +p a:hover { + color: #ee4c2c; +} + +.wy-breadcrumbs li a { + color: #ee4c2c; +} + +ul.pytorch-breadcrumbs { + padding-left: 0; + list-style-type: none; +} +ul.pytorch-breadcrumbs li { + display: inline-block; + font-size: 0.875rem; +} +ul.pytorch-breadcrumbs a { + color: #ee4c2c; + text-decoration: none; +} + +.table-of-contents-link-wrapper { + display: block; + margin-top: 0; + padding: 1.25rem 1.875rem; + background-color: #f3f4f7; + position: relative; + color: #262626; + font-size: 1.25rem; +} +.table-of-contents-link-wrapper.is-open .toggle-table-of-contents { + -webkit-transform: rotate(180deg); + transform: rotate(180deg); +} +@media screen and (min-width: 1100px) { + .table-of-contents-link-wrapper { + display: none; + } +} + +.toggle-table-of-contents { + background-image: url("../images/chevron-down-grey.svg"); + background-position: center center; + background-repeat: no-repeat; + background-size: 18px 18px; + height: 100%; + position: absolute; + right: 21px; + width: 30px; + top: 0; +} + +.tutorials-header .header-logo { + background-image: url("../images/logo-dark.svg"); +} +.tutorials-header .main-menu ul li a { + color: #262626; +} +.tutorials-header .main-menu-open-button { + background-image: url("../images/icon-menu-dots-dark.svg"); +} + +.rst-content footer .helpful-hr.hr-top { + margin-bottom: -0.0625rem; +} +.rst-content footer .helpful-hr.hr-bottom { + margin-top: -0.0625rem; +} +.rst-content footer .helpful-container { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + font-size: 1.125rem; +} +.rst-content footer .helpful-container .helpful-question, .rst-content footer .helpful-container .was-helpful-thank-you { + padding: 0.625rem 1.25rem 0.625rem 1.25rem; +} +.rst-content footer .helpful-container .was-helpful-thank-you { + display: none; +} +.rst-content footer .helpful-container .helpful-question.yes-link, .rst-content footer .helpful-container .helpful-question.no-link { + color: #e44c2c; + cursor: pointer; +} +.rst-content footer .helpful-container .helpful-question.yes-link:hover, .rst-content footer .helpful-container .helpful-question.no-link:hover { + background-color: #e44c2c; + color: #ffffff; +} +.rst-content footer div[role="contentinfo"] { + padding-top: 2.5rem; +} +.rst-content footer div[role="contentinfo"] p { + margin-bottom: 0; +} + +h1 { + font-size: 2rem; + letter-spacing: 1.78px; + line-height: 2.5rem; + text-transform: uppercase; + margin: 1.375rem 0; +} + +span.pre { + color: #6c6c6d; + background-color: #f3f4f7; + padding: 2px 6px; +} + +pre { + background-color: #f3f4f7; + padding: 1.375rem; +} + +.highlight .c1 { + color: #6c6c6d; +} + +.headerlink { + display: none !important; +} + +a:link.has-code, +a:hover.has-code, +a:visited.has-code { + color: #4974D1; +} +a:link.has-code span, +a:hover.has-code span, +a:visited.has-code span { + color: #4974D1; +} + +article.pytorch-article ul, +article.pytorch-article ol { + padding-left: 1.875rem; + margin: 0; +} +article.pytorch-article ul li, +article.pytorch-article ol li { + margin: 0; + line-height: 1.75rem; +} +article.pytorch-article ul p, +article.pytorch-article ol p { + line-height: 1.75rem; + margin-bottom: 0; +} +article.pytorch-article ul ul, +article.pytorch-article ul ol, +article.pytorch-article ol ul, +article.pytorch-article ol ol { + margin: 0; +} +article.pytorch-article h1, +article.pytorch-article h2, +article.pytorch-article h3, +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + font-weight: normal; +} +article.pytorch-article h1 a, +article.pytorch-article h2 a, +article.pytorch-article h3 a, +article.pytorch-article h4 a, +article.pytorch-article h5 a, +article.pytorch-article h6 a { + color: #262626; +} +article.pytorch-article p.caption { + margin-top: 1.25rem; +} + +article.pytorch-article .section:first-of-type h1:first-of-type { + margin-top: 0; +} + +article.pytorch-article .sphx-glr-thumbcontainer { + margin: 0; + border: 1px solid #d6d7d8; + border-radius: 0; + width: 45%; + text-align: center; + margin-bottom: 5%; +} +@media screen and (max-width: 1100px) { + article.pytorch-article .sphx-glr-thumbcontainer:nth-child(odd) { + margin-left: 0; + margin-right: 2.5%; + } + article.pytorch-article .sphx-glr-thumbcontainer:nth-child(even) { + margin-right: 0; + margin-left: 2.5%; + } + article.pytorch-article .sphx-glr-thumbcontainer .figure { + width: 40%; + } +} +@media screen and (min-width: 1101px) { + article.pytorch-article .sphx-glr-thumbcontainer { + margin-right: 3%; + margin-bottom: 3%; + width: 30%; + } +} +article.pytorch-article .sphx-glr-thumbcontainer .caption-text a { + font-size: 1rem; + color: #262626; + letter-spacing: 0; + line-height: 1.5rem; + text-decoration: none; +} +article.pytorch-article .sphx-glr-thumbcontainer:hover { + -webkit-box-shadow: none; + box-shadow: none; + border-bottom-color: #ffffff; +} +article.pytorch-article .sphx-glr-thumbcontainer:hover .figure:before { + bottom: 100%; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure { + width: 80%; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure:before { + content: ""; + display: block; + position: absolute; + top: 0; + bottom: 35%; + left: 0; + right: 0; + background: #8A94B3; + opacity: 0.10; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure a.reference.internal { + text-align: left; +} +@media screen and (min-width: 768px) { + article.pytorch-article .sphx-glr-thumbcontainer:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + article.pytorch-article .sphx-glr-thumbcontainer:hover:after { + width: 100%; + } +} +@media screen and (min-width: 768px) { + article.pytorch-article .sphx-glr-thumbcontainer:after { + background-color: #ee4c2c; + } +} + +article.pytorch-article .section :not(dt) > code { + color: #262626; + border-top: solid 2px #f3f4f7; + background-color: #f3f4f7; + border-bottom: solid 2px #f3f4f7; + padding: 0px 3px; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; +} +article.pytorch-article .section :not(dt) > code .pre { + outline: 0px; + padding: 0px; +} +article.pytorch-article .function dt, article.pytorch-article .attribute dt, article.pytorch-article .class .attribute dt, article.pytorch-article .class dt { + position: relative; + background: #f3f4f7; + padding: 0.5rem; + border-left: 3px solid #ee4c2c; + word-wrap: break-word; + padding-right: 100px; +} +article.pytorch-article .function dt em.property, article.pytorch-article .attribute dt em.property, article.pytorch-article .class dt em.property { + font-family: inherit; +} +article.pytorch-article .function dt em, article.pytorch-article .attribute dt em, article.pytorch-article .class .attribute dt em, article.pytorch-article .class dt em, article.pytorch-article .function dt .sig-paren, article.pytorch-article .attribute dt .sig-paren, article.pytorch-article .class dt .sig-paren { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; + font-size: 87.5%; +} +article.pytorch-article .function dt a, article.pytorch-article .attribute dt a, article.pytorch-article .class .attribute dt a, article.pytorch-article .class dt a { + position: absolute; + right: 30px; + padding-right: 0; + top: 50%; + -webkit-transform: perspective(1px) translateY(-50%); + transform: perspective(1px) translateY(-50%); +} +article.pytorch-article .function dt:hover .viewcode-link, article.pytorch-article .attribute dt:hover .viewcode-link, article.pytorch-article .class dt:hover .viewcode-link { + color: #ee4c2c; +} +article.pytorch-article .function .anchorjs-link, article.pytorch-article .attribute .anchorjs-link, article.pytorch-article .class .anchorjs-link { + display: inline; + position: absolute; + right: 8px; + font-size: 1.5625rem !important; + padding-left: 0; +} +article.pytorch-article .function dt > code, article.pytorch-article .attribute dt > code, article.pytorch-article .class .attribute dt > code, article.pytorch-article .class dt > code { + color: #262626; + border-top: solid 2px #f3f4f7; + background-color: #f3f4f7; + border-bottom: solid 2px #f3f4f7; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; +} +article.pytorch-article .function .viewcode-link, article.pytorch-article .attribute .viewcode-link, article.pytorch-article .class .viewcode-link { + font-size: 0.875rem; + color: #979797; + letter-spacing: 0; + line-height: 1.5rem; + text-transform: uppercase; +} +article.pytorch-article .function dd, article.pytorch-article .attribute dd, article.pytorch-article .class .attribute dd, article.pytorch-article .class dd { + padding-left: 3.75rem; +} +article.pytorch-article .function dd p, article.pytorch-article .attribute dd p, article.pytorch-article .class .attribute dd p, article.pytorch-article .class dd p { + color: #262626; +} +article.pytorch-article .function table tbody tr th.field-name, article.pytorch-article .attribute table tbody tr th.field-name, article.pytorch-article .class table tbody tr th.field-name { + white-space: nowrap; + color: #262626; + width: 20%; +} +@media screen and (min-width: 768px) { + article.pytorch-article .function table tbody tr th.field-name, article.pytorch-article .attribute table tbody tr th.field-name, article.pytorch-article .class table tbody tr th.field-name { + width: 15%; + } +} +article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + padding: 0.625rem; + width: 80%; + color: #262626; +} +@media screen and (min-width: 768px) { + article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + width: 85%; + } +} +@media screen and (min-width: 1600px) { + article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + padding-left: 1.25rem; + } +} +article.pytorch-article .function table tbody tr td.field-body p, article.pytorch-article .attribute table tbody tr td.field-body p, article.pytorch-article .class table tbody tr td.field-body p { + padding-left: 0px; +} +article.pytorch-article .function table tbody tr td.field-body p:last-of-type, article.pytorch-article .attribute table tbody tr td.field-body p:last-of-type, article.pytorch-article .class table tbody tr td.field-body p:last-of-type { + margin-bottom: 0; +} +article.pytorch-article .function table tbody tr td.field-body ol, article.pytorch-article .attribute table tbody tr td.field-body ol, article.pytorch-article .class table tbody tr td.field-body ol, article.pytorch-article .function table tbody tr td.field-body ul, article.pytorch-article .attribute table tbody tr td.field-body ul, article.pytorch-article .class table tbody tr td.field-body ul { + padding-left: 1rem; + padding-bottom: 0; +} +article.pytorch-article .function table.docutils.field-list, article.pytorch-article .attribute table.docutils.field-list, article.pytorch-article .class table.docutils.field-list { + margin-bottom: 0.75rem; +} +article.pytorch-article .attribute .has-code { + float: none; +} +article.pytorch-article .class dt { + border-left: none; + border-top: 3px solid #ee4c2c; + padding-left: 4em; +} +article.pytorch-article .class dt em.property { + position: absolute; + left: 0.5rem; +} +article.pytorch-article .class dd .docutils dt { + padding-left: 0.5rem; +} +article.pytorch-article .class em.property { + text-transform: uppercase; + font-style: normal; + color: #ee4c2c; + font-size: 1rem; + letter-spacing: 0; + padding-right: 0.75rem; +} +article.pytorch-article .class dl dt em.property { + position: static; + left: 0; + padding-right: 0; +} +article.pytorch-article .class .method dt, +article.pytorch-article .class .staticmethod dt { + border-left: 3px solid #ee4c2c; + border-top: none; +} +article.pytorch-article .class .method dt, +article.pytorch-article .class .staticmethod dt { + padding-left: 0.5rem; +} +article.pytorch-article .class .attribute dt { + border-top: none; +} +article.pytorch-article .class .attribute dt em.property { + position: relative; + left: 0; +} +article.pytorch-article table { + table-layout: fixed; +} + +article.pytorch-article .note, +article.pytorch-article .warning, +article.pytorch-article .tip, +article.pytorch-article .hint, +article.pytorch-article .important, +article.pytorch-article .caution, +article.pytorch-article .danger, +article.pytorch-article .attention, +article.pytorch-article .error { + background: #f3f4f7; + margin-top: 1.875rem; + margin-bottom: 1.125rem; +} +article.pytorch-article .note .admonition-title, +article.pytorch-article .warning .admonition-title, +article.pytorch-article .tip .admonition-title, +article.pytorch-article .hint .admonition-title, +article.pytorch-article .important .admonition-title, +article.pytorch-article .caution .admonition-title, +article.pytorch-article .danger .admonition-title, +article.pytorch-article .attention .admonition-title, +article.pytorch-article .error .admonition-title { + color: #ffffff; + letter-spacing: 1px; + text-transform: uppercase; + margin-bottom: 1.125rem; + padding: 3px 0 3px 1.375rem; + position: relative; + font-size: 0.875rem; +} +article.pytorch-article .note .admonition-title:before, +article.pytorch-article .warning .admonition-title:before, +article.pytorch-article .tip .admonition-title:before, +article.pytorch-article .hint .admonition-title:before, +article.pytorch-article .important .admonition-title:before, +article.pytorch-article .caution .admonition-title:before, +article.pytorch-article .danger .admonition-title:before, +article.pytorch-article .attention .admonition-title:before, +article.pytorch-article .error .admonition-title:before { + content: "\2022"; + position: absolute; + left: 9px; + color: #ffffff; + top: 2px; +} +article.pytorch-article .note p:nth-child(n + 2), +article.pytorch-article .warning p:nth-child(n + 2), +article.pytorch-article .tip p:nth-child(n + 2), +article.pytorch-article .hint p:nth-child(n + 2), +article.pytorch-article .important p:nth-child(n + 2), +article.pytorch-article .caution p:nth-child(n + 2), +article.pytorch-article .danger p:nth-child(n + 2), +article.pytorch-article .attention p:nth-child(n + 2), +article.pytorch-article .error p:nth-child(n + 2) { + padding: 0 1.375rem; +} +article.pytorch-article .note table, +article.pytorch-article .warning table, +article.pytorch-article .tip table, +article.pytorch-article .hint table, +article.pytorch-article .important table, +article.pytorch-article .caution table, +article.pytorch-article .danger table, +article.pytorch-article .attention table, +article.pytorch-article .error table { + margin: 0 2rem; + width: auto; +} +article.pytorch-article .note .pre, +article.pytorch-article .note pre, +article.pytorch-article .warning .pre, +article.pytorch-article .warning pre, +article.pytorch-article .tip .pre, +article.pytorch-article .tip pre, +article.pytorch-article .hint .pre, +article.pytorch-article .hint pre, +article.pytorch-article .important .pre, +article.pytorch-article .important pre, +article.pytorch-article .caution .pre, +article.pytorch-article .caution pre, +article.pytorch-article .danger .pre, +article.pytorch-article .danger pre, +article.pytorch-article .attention .pre, +article.pytorch-article .attention pre, +article.pytorch-article .error .pre, +article.pytorch-article .error pre { + background: #ffffff; + outline: 1px solid #e9e9e9; +} +article.pytorch-article .note :not(dt) > code, +article.pytorch-article .warning :not(dt) > code, +article.pytorch-article .tip :not(dt) > code, +article.pytorch-article .hint :not(dt) > code, +article.pytorch-article .important :not(dt) > code, +article.pytorch-article .caution :not(dt) > code, +article.pytorch-article .danger :not(dt) > code, +article.pytorch-article .attention :not(dt) > code, +article.pytorch-article .error :not(dt) > code { + border-top: solid 2px #ffffff; + background-color: #ffffff; + border-bottom: solid 2px #ffffff; + padding: 0px 3px; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; + outline: 1px solid #e9e9e9; +} +article.pytorch-article .note :not(dt) > code .pre, +article.pytorch-article .warning :not(dt) > code .pre, +article.pytorch-article .tip :not(dt) > code .pre, +article.pytorch-article .hint :not(dt) > code .pre, +article.pytorch-article .important :not(dt) > code .pre, +article.pytorch-article .caution :not(dt) > code .pre, +article.pytorch-article .danger :not(dt) > code .pre, +article.pytorch-article .attention :not(dt) > code .pre, +article.pytorch-article .error :not(dt) > code .pre { + outline: 0px; + padding: 0px; +} +article.pytorch-article .note pre, +article.pytorch-article .warning pre, +article.pytorch-article .tip pre, +article.pytorch-article .hint pre, +article.pytorch-article .important pre, +article.pytorch-article .caution pre, +article.pytorch-article .danger pre, +article.pytorch-article .attention pre, +article.pytorch-article .error pre { + margin-bottom: 0; +} +article.pytorch-article .note .highlight, +article.pytorch-article .warning .highlight, +article.pytorch-article .tip .highlight, +article.pytorch-article .hint .highlight, +article.pytorch-article .important .highlight, +article.pytorch-article .caution .highlight, +article.pytorch-article .danger .highlight, +article.pytorch-article .attention .highlight, +article.pytorch-article .error .highlight { + margin: 0 2rem 1.125rem 2rem; +} +article.pytorch-article .note ul, +article.pytorch-article .note ol, +article.pytorch-article .warning ul, +article.pytorch-article .warning ol, +article.pytorch-article .tip ul, +article.pytorch-article .tip ol, +article.pytorch-article .hint ul, +article.pytorch-article .hint ol, +article.pytorch-article .important ul, +article.pytorch-article .important ol, +article.pytorch-article .caution ul, +article.pytorch-article .caution ol, +article.pytorch-article .danger ul, +article.pytorch-article .danger ol, +article.pytorch-article .attention ul, +article.pytorch-article .attention ol, +article.pytorch-article .error ul, +article.pytorch-article .error ol { + padding-left: 3.25rem; +} +article.pytorch-article .note ul li, +article.pytorch-article .note ol li, +article.pytorch-article .warning ul li, +article.pytorch-article .warning ol li, +article.pytorch-article .tip ul li, +article.pytorch-article .tip ol li, +article.pytorch-article .hint ul li, +article.pytorch-article .hint ol li, +article.pytorch-article .important ul li, +article.pytorch-article .important ol li, +article.pytorch-article .caution ul li, +article.pytorch-article .caution ol li, +article.pytorch-article .danger ul li, +article.pytorch-article .danger ol li, +article.pytorch-article .attention ul li, +article.pytorch-article .attention ol li, +article.pytorch-article .error ul li, +article.pytorch-article .error ol li { + color: #262626; +} +article.pytorch-article .note p, +article.pytorch-article .warning p, +article.pytorch-article .tip p, +article.pytorch-article .hint p, +article.pytorch-article .important p, +article.pytorch-article .caution p, +article.pytorch-article .danger p, +article.pytorch-article .attention p, +article.pytorch-article .error p { + margin-top: 1.125rem; +} +article.pytorch-article .note .admonition-title { + background: #54c7ec; +} +article.pytorch-article .warning .admonition-title { + background: #e94f3b; +} +article.pytorch-article .tip .admonition-title { + background: #6bcebb; +} +article.pytorch-article .hint .admonition-title { + background: #a2cdde; +} +article.pytorch-article .important .admonition-title { + background: #5890ff; +} +article.pytorch-article .caution .admonition-title { + background: #f7923a; +} +article.pytorch-article .danger .admonition-title { + background: #db2c49; +} +article.pytorch-article .attention .admonition-title { + background: #f5a623; +} +article.pytorch-article .error .admonition-title { + background: #cc2f90; +} +article.pytorch-article .sphx-glr-download-link-note.admonition.note, +article.pytorch-article .reference.download.internal, article.pytorch-article .sphx-glr-signature { + display: none; +} +article.pytorch-article .admonition > p:last-of-type { + margin-bottom: 0; + padding-bottom: 1.125rem !important; +} + +.pytorch-article div.sphx-glr-download a { + background-color: #f3f4f7; + background-image: url("../images/arrow-down-orange.svg"); + background-repeat: no-repeat; + background-position: left 10px center; + background-size: 15px 15px; + border-radius: 0; + border: none; + display: block; + text-align: left; + padding: 0.9375rem 3.125rem; + position: relative; + margin: 1.25rem auto; +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + .pytorch-article div.sphx-glr-download a:hover:after { + width: 100%; + } +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a:after { + background-color: #ee4c2c; + } +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a { + background-position: left 20px center; + } +} +.pytorch-article div.sphx-glr-download a:hover { + -webkit-box-shadow: none; + box-shadow: none; + text-decoration: none; + background-image: url("../images/arrow-down-orange.svg"); + background-color: #f3f4f7; +} +.pytorch-article div.sphx-glr-download a span.pre { + background-color: transparent; + font-size: 1.125rem; + padding: 0; + color: #262626; +} +.pytorch-article div.sphx-glr-download a code, .pytorch-article div.sphx-glr-download a kbd, .pytorch-article div.sphx-glr-download a pre, .pytorch-article div.sphx-glr-download a samp, .pytorch-article div.sphx-glr-download a span.pre { + font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; +} + +.pytorch-article p.sphx-glr-script-out { + margin-bottom: 1.125rem; +} + +.pytorch-article div.sphx-glr-script-out { + margin-bottom: 2.5rem; +} +.pytorch-article div.sphx-glr-script-out .highlight { + margin-left: 0; + margin-top: 0; +} +.pytorch-article div.sphx-glr-script-out .highlight pre { + background-color: #fdede9; + padding: 1.5625rem; + color: #837b79; +} +.pytorch-article div.sphx-glr-script-out + p { + margin-top: unset; +} + +article.pytorch-article .wy-table-responsive table { + border: none; + border-color: #ffffff !important; + table-layout: fixed; +} +article.pytorch-article .wy-table-responsive table thead tr { + border-bottom: 2px solid #6c6c6d; +} +article.pytorch-article .wy-table-responsive table thead th { + line-height: 1.75rem; + padding-left: 0.9375rem; + padding-right: 0.9375rem; +} +article.pytorch-article .wy-table-responsive table tbody .row-odd { + background-color: #f3f4f7; +} +article.pytorch-article .wy-table-responsive table tbody td { + color: #6c6c6d; + white-space: normal; + padding: 0.9375rem; + font-size: 1rem; + line-height: 1.375rem; +} +article.pytorch-article .wy-table-responsive table tbody td .pre { + background: #ffffff; + outline: 1px solid #e9e9e9; + color: #ee4c2c; + font-size: 87.5%; +} +article.pytorch-article .wy-table-responsive table tbody td code { + font-size: 87.5%; +} + +a[rel~="prev"], a[rel~="next"] { + padding: 0.375rem 0 0 0; +} + +img.next-page, +img.previous-page { + width: 8px; + height: 10px; + position: relative; + top: -1px; +} + +img.previous-page { + -webkit-transform: scaleX(-1); + transform: scaleX(-1); +} + +.rst-footer-buttons { + margin-top: 1.875rem; + margin-bottom: 1.875rem; +} +.rst-footer-buttons .btn:focus, +.rst-footer-buttons .btn.focus { + -webkit-box-shadow: none; + box-shadow: none; +} + +article.pytorch-article blockquote { + margin-left: 3.75rem; + color: #6c6c6d; +} + +article.pytorch-article .caption { + color: #6c6c6d; + letter-spacing: 0.25px; + line-height: 2.125rem; +} + +article.pytorch-article .math { + color: #262626; + width: auto; + text-align: center; +} +article.pytorch-article .math img { + width: auto; +} + +.pytorch-breadcrumbs-wrapper { + width: 100%; +} +@media screen and (min-width: 1101px) { + .pytorch-breadcrumbs-wrapper { + float: left; + margin-left: 3%; + width: 75%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-breadcrumbs-wrapper { + width: 850px; + margin-left: 1.875rem; + } +} +.pytorch-breadcrumbs-wrapper .pytorch-breadcrumbs-aside { + float: right; +} + +.pytorch-article .container { + padding-left: 0; + padding-right: 0; + max-width: none; +} + +a:link, +a:visited, +a:hover { + color: #ee4c2c; +} + +::-webkit-input-placeholder { + color: #ee4c2c; +} + +::-moz-placeholder { + color: #ee4c2c; +} + +:-ms-input-placeholder { + color: #ee4c2c; +} + +:-moz-placeholder { + color: #ee4c2c; +} + +@media screen and (min-width: 768px) { + .site-footer a:hover { + color: #ee4c2c; + } +} + +.docs-tutorials-resources a { + color: #ee4c2c; +} + +.header-holder { + position: relative; + z-index: 201; +} + +.header-holder .main-menu ul li.active:after { + color: #ee4c2c; +} +.header-holder .main-menu ul li.active a { + color: #ee4c2c; +} +@media screen and (min-width: 1100px) { + .header-holder .main-menu ul li a:hover { + color: #ee4c2c; + } +} + +.mobile-main-menu.open ul li.active a { + color: #ee4c2c; +} + +.version { + padding-bottom: 1rem; +} + +.pytorch-call-to-action-links { + padding-top: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links { + padding-top: 2.5rem; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .pytorch-call-to-action-links { + padding-top: 0; + } +} +@media (min-width: 1100px) and (max-width: 1239px) { + .pytorch-call-to-action-links { + padding-top: 2.5rem; + } +} +.pytorch-call-to-action-links #tutorial-type { + display: none; +} +.pytorch-call-to-action-links .call-to-action-img, .pytorch-call-to-action-links .call-to-action-notebook-img { + height: 1.375rem; + width: 1.375rem; + margin-right: 10px; +} +.pytorch-call-to-action-links .call-to-action-notebook-img { + height: 1rem; +} +.pytorch-call-to-action-links a { + padding-right: 1.25rem; + color: #000000; + cursor: pointer; +} +.pytorch-call-to-action-links a:hover { + color: #e44c2c; +} +.pytorch-call-to-action-links a .call-to-action-desktop-view { + display: none; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links a .call-to-action-desktop-view { + display: block; + } +} +.pytorch-call-to-action-links a .call-to-action-mobile-view { + display: block; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links a .call-to-action-mobile-view { + display: none; + } +} +.pytorch-call-to-action-links a #google-colab-link, .pytorch-call-to-action-links a #download-notebook-link, +.pytorch-call-to-action-links a #github-view-link { + padding-bottom: 0.625rem; + border-bottom: 1px solid #f3f4f7; + padding-right: 2.5rem; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.pytorch-call-to-action-links a #google-colab-link:hover, .pytorch-call-to-action-links a #download-notebook-link:hover, +.pytorch-call-to-action-links a #github-view-link:hover { + border-bottom-color: #e44c2c; + color: #e44c2c; +} + +#tutorial-cards-container #tutorial-cards { + width: 100%; +} +#tutorial-cards-container .tutorials-nav { + padding-left: 0; + padding-right: 0; + padding-bottom: 0; +} +#tutorial-cards-container .tutorials-hr { + margin-top: 1rem; + margin-bottom: 1rem; +} +#tutorial-cards-container .card.tutorials-card { + border-radius: 0; + border-color: #f3f4f7; + height: 98px; + margin-bottom: 1.25rem; + margin-bottom: 1.875rem; + overflow: scroll; + background-color: #f3f4f7; + cursor: pointer; +} +@media screen and (min-width: 1240px) { + #tutorial-cards-container .card.tutorials-card { + height: 200px; + overflow: inherit; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card { + height: 200px; + overflow: scroll; + } +} +#tutorial-cards-container .card.tutorials-card .tutorials-image { + position: absolute; + top: 0px; + right: 0px; + height: 96px; + width: 96px; + opacity: 0.5; +} +#tutorial-cards-container .card.tutorials-card .tutorials-image img { + height: 100%; + width: 100%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card .tutorials-image { + height: 100%; + width: 25%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card .tutorials-image { + height: 100%; + width: 198px; + } +} +#tutorial-cards-container .card.tutorials-card .tutorials-image:before { + content: ''; + position: absolute; + top: 0; + left: 0; + bottom: 0; + right: 0; + z-index: 1; + background: #000000; + opacity: .075; +} +#tutorial-cards-container .card.tutorials-card .card-title-container { + width: 70%; + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card .card-title-container { + width: 75%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card .card-title-container { + width: 70%; + } +} +#tutorial-cards-container .card.tutorials-card .card-title-container h4 { + margin-bottom: 1.125rem; + margin-top: 0; + font-size: 1.5rem; +} +#tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + font-size: 0.9375rem; + line-height: 1.5rem; + margin-bottom: 0; + color: #6c6c6d; + font-weight: 400; + width: 70%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + width: 75%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + width: 70%; + } +} +#tutorial-cards-container .card.tutorials-card p.tags { + margin-top: 30px; + text-overflow: ellipsis; + white-space: nowrap; + overflow: hidden; +} +#tutorial-cards-container .card.tutorials-card h4 { + color: #262626; + margin-bottom: 1.125rem; +} +#tutorial-cards-container .card.tutorials-card a { + height: 100%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card a { + min-height: 190px; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card a { + min-height: 234px; + } +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + #tutorial-cards-container .card.tutorials-card:hover:after { + width: 100%; + } +} +#tutorial-cards-container .card.tutorials-card:hover { + background-color: #ffffff; + border: 1px solid #e2e2e2; + border-bottom: none; +} +#tutorial-cards-container .card.tutorials-card:hover p.card-summary { + color: #262626; +} +#tutorial-cards-container .card.tutorials-card:hover .tutorials-image { + opacity: unset; +} +#tutorial-cards-container .tutorial-tags-container { + width: 75%; +} +#tutorial-cards-container .tutorial-tags-container.active { + width: 0; +} +#tutorial-cards-container .tutorial-filter-menu ul { + list-style-type: none; + padding-left: 1.25rem; +} +#tutorial-cards-container .tutorial-filter-menu ul li { + padding-right: 1.25rem; + word-break: break-all; +} +#tutorial-cards-container .tutorial-filter-menu ul li a { + color: #979797; +} +#tutorial-cards-container .tutorial-filter-menu ul li a:hover { + color: #e44c2c; +} +#tutorial-cards-container .tutorial-filter { + cursor: pointer; +} +#tutorial-cards-container .filter-btn { + color: #979797; + border: 1px solid #979797; + display: inline-block; + text-align: center; + white-space: nowrap; + vertical-align: middle; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + margin-bottom: 5px; +} +#tutorial-cards-container .filter-btn:hover { + border: 1px solid #e44c2c; + color: #e44c2c; +} +#tutorial-cards-container .filter-btn.selected { + background-color: #e44c2c; + border: 1px solid #e44c2c; + color: #ffffff; +} +#tutorial-cards-container .all-tag-selected { + background-color: #979797; + color: #ffffff; +} +#tutorial-cards-container .all-tag-selected:hover { + border-color: #979797; + color: #ffffff; +} +#tutorial-cards-container .pagination .page { + border: 1px solid #dee2e6; + padding: 0.5rem 0.75rem; +} +#tutorial-cards-container .pagination .active .page { + background-color: #dee2e6; +} + +article.pytorch-article .tutorials-callout-container { + padding-bottom: 50px; +} +article.pytorch-article .tutorials-callout-container .col-md-6 { + padding-bottom: 10px; +} +article.pytorch-article .tutorials-callout-container .text-container { + padding: 10px 0px 30px 0px; + padding-bottom: 10px; +} +article.pytorch-article .tutorials-callout-container .text-container .body-paragraph { + color: #666666; + font-weight: 300; + font-size: 1.125rem; + line-height: 1.875rem; +} +article.pytorch-article .tutorials-callout-container .btn.callout-button { + font-size: 1.125rem; + border-radius: 0; + border: none; + background-color: #f3f4f7; + color: #6c6c6d; + font-weight: 400; + position: relative; + letter-spacing: 0.25px; +} +@media screen and (min-width: 768px) { + article.pytorch-article .tutorials-callout-container .btn.callout-button:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + article.pytorch-article .tutorials-callout-container .btn.callout-button:hover:after { + width: 100%; + } +} +article.pytorch-article .tutorials-callout-container .btn.callout-button a { + color: inherit; +} + +.pytorch-container { + margin: 0 auto; + padding: 0 1.875rem; + width: auto; + position: relative; +} +@media screen and (min-width: 1100px) { + .pytorch-container { + padding: 0; + } +} +@media screen and (min-width: 1101px) { + .pytorch-container { + margin-left: 25%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-container { + margin-left: 350px; + } +} +.pytorch-container:before, .pytorch-container:after { + content: ""; + display: table; +} +.pytorch-container:after { + clear: both; +} +.pytorch-container { + *zoom: 1; +} + +.pytorch-content-wrap { + background-color: #ffffff; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + position: relative; + padding-top: 0; +} +.pytorch-content-wrap:before, .pytorch-content-wrap:after { + content: ""; + display: table; +} +.pytorch-content-wrap:after { + clear: both; +} +.pytorch-content-wrap { + *zoom: 1; +} +@media screen and (min-width: 1101px) { + .pytorch-content-wrap { + padding-top: 45px; + float: left; + width: 100%; + display: block; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-wrap { + width: 100%; + } +} + +.pytorch-content { + background: #ffffff; + width: 100%; + max-width: 700px; + position: relative; +} + +.pytorch-content-left { + margin-top: 2.5rem; + width: 100%; +} +@media screen and (min-width: 1101px) { + .pytorch-content-left { + margin-top: 0; + margin-left: 3%; + width: 75%; + float: left; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-left { + width: 850px; + margin-left: 30px; + } +} +.pytorch-content-left .main-content { + padding-top: 0.9375rem; +} +.pytorch-content-left .main-content ul.simple { + padding-bottom: 1.25rem; +} +.pytorch-content-left .main-content .note:nth-child(1), .pytorch-content-left .main-content .warning:nth-child(1) { + margin-top: 0; +} + +.pytorch-content-right { + display: none; + position: relative; + overflow-x: hidden; + overflow-y: hidden; +} +@media screen and (min-width: 1101px) { + .pytorch-content-right { + display: block; + margin-left: 0; + width: 19%; + float: left; + height: 100%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-right { + width: 280px; + } +} + +@media screen and (min-width: 1101px) { + .pytorch-side-scroll { + position: relative; + overflow-x: hidden; + overflow-y: scroll; + height: 100%; + } +} + +.pytorch-menu-vertical { + padding: 1.25rem 1.875rem 2.5rem 1.875rem; +} +@media screen and (min-width: 1101px) { + .pytorch-menu-vertical { + display: block; + padding-top: 0; + padding-right: 13.5%; + padding-bottom: 5.625rem; + } +} +@media screen and (min-width: 1600px) { + .pytorch-menu-vertical { + padding-left: 0; + padding-right: 1.5625rem; + } +} + +.pytorch-left-menu { + display: none; + background-color: #f3f4f7; + color: #262626; + overflow: scroll; +} +@media screen and (min-width: 1101px) { + .pytorch-left-menu { + display: block; + overflow-x: hidden; + overflow-y: hidden; + padding-bottom: 110px; + padding: 0 1.875rem 0 0; + width: 25%; + z-index: 200; + float: left; + } + .pytorch-left-menu.make-fixed { + position: fixed; + top: 0; + bottom: 0; + left: 0; + float: none; + } +} +@media screen and (min-width: 1600px) { + .pytorch-left-menu { + padding: 0 0 0 1.875rem; + width: 350px; + } +} + +.expand-menu, .hide-menu { + color: #6c6c6d; + padding-left: 10px; + cursor: pointer; +} + +.hide-menu { + display: none; +} + +.left-nav-top-caption { + padding-top: 1rem; +} + +.pytorch-left-menu p.caption { + color: #262626; + display: block; + font-size: 1rem; + line-height: 1.375rem; + margin-bottom: 1rem; + text-transform: none; + white-space: nowrap; +} + +.pytorch-left-menu-search { + margin-bottom: 2.5rem; +} +@media screen and (min-width: 1101px) { + .pytorch-left-menu-search { + margin: 1.25rem 0.625rem 1.875rem 0; + } +} + +.pytorch-left-menu-search ::-webkit-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search :-ms-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::-ms-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::placeholder { + color: #262626; +} + +.pytorch-left-menu-search input[type=text] { + border-radius: 0; + padding: 0.5rem 0.75rem; + border-color: #ffffff; + color: #262626; + border-style: solid; + font-size: 1rem; + width: 100%; + background-color: #f3f4f7; + background-image: url("../images/search-icon.svg"); + background-repeat: no-repeat; + background-size: 18px 18px; + background-position: 12px 10px; + padding-left: 40px; + background-color: #ffffff; +} +.pytorch-left-menu-search input[type=text]:focus { + outline: 0; +} + +@media screen and (min-width: 1101px) { + .pytorch-left-menu .pytorch-side-scroll { + width: 120%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-left-menu .pytorch-side-scroll { + width: 340px; + } +} + +.pytorch-right-menu { + min-height: 100px; + overflow-x: hidden; + overflow-y: hidden; + left: 0; + z-index: 200; + padding-top: 0; + position: relative; +} +@media screen and (min-width: 1101px) { + .pytorch-right-menu { + width: 100%; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 83.5%; + width: 14%; + } + .pytorch-right-menu.scrolling-absolute { + position: absolute; + left: 0; + } +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu { + left: 0; + width: 380px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1230px; + } + .pytorch-right-menu.scrolling-absolute { + position: absolute; + left: 0; + } +} + +.pytorch-left-menu ul, +.pytorch-right-menu ul { + list-style-type: none; + padding-left: 0; + margin-bottom: 2.5rem; +} +.pytorch-left-menu > ul, +.pytorch-right-menu > ul { + margin-bottom: 2.5rem; +} +.pytorch-left-menu a:link, +.pytorch-left-menu a:visited, +.pytorch-left-menu a:hover, +.pytorch-right-menu a:link, +.pytorch-right-menu a:visited, +.pytorch-right-menu a:hover { + color: #6c6c6d; + font-size: 0.875rem; + line-height: 1rem; + padding: 0; + text-decoration: none; +} +.pytorch-left-menu a:link.reference.internal, +.pytorch-left-menu a:visited.reference.internal, +.pytorch-left-menu a:hover.reference.internal, +.pytorch-right-menu a:link.reference.internal, +.pytorch-right-menu a:visited.reference.internal, +.pytorch-right-menu a:hover.reference.internal { + margin-bottom: 0.3125rem; + position: relative; +} +.pytorch-left-menu li code, +.pytorch-right-menu li code { + border: none; + background: inherit; + color: inherit; + padding-left: 0; + padding-right: 0; +} +.pytorch-left-menu li span.toctree-expand, +.pytorch-right-menu li span.toctree-expand { + display: block; + float: left; + margin-left: -1.2em; + font-size: 0.8em; + line-height: 1.6em; +} +.pytorch-left-menu li.on a, .pytorch-left-menu li.current > a, +.pytorch-right-menu li.on a, +.pytorch-right-menu li.current > a { + position: relative; + border: none; +} +.pytorch-left-menu li.on a span.toctree-expand, .pytorch-left-menu li.current > a span.toctree-expand, +.pytorch-right-menu li.on a span.toctree-expand, +.pytorch-right-menu li.current > a span.toctree-expand { + display: block; + font-size: 0.8em; + line-height: 1.6em; +} +.pytorch-left-menu li.toctree-l1.current > a, +.pytorch-right-menu li.toctree-l1.current > a { + color: #ee4c2c; +} +.pytorch-left-menu li.toctree-l1.current > a:before, +.pytorch-right-menu li.toctree-l1.current > a:before { + content: "\2022"; + display: inline-block; + position: absolute; + left: -15px; + top: 1px; + font-size: 1.375rem; + color: #ee4c2c; +} +@media screen and (min-width: 1101px) { + .pytorch-left-menu li.toctree-l1.current > a:before, + .pytorch-right-menu li.toctree-l1.current > a:before { + left: -20px; + } +} +.pytorch-left-menu li.toctree-l1.current li.toctree-l2 > ul, .pytorch-left-menu li.toctree-l2.current li.toctree-l3 > ul, +.pytorch-right-menu li.toctree-l1.current li.toctree-l2 > ul, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3 > ul { + display: none; +} +.pytorch-left-menu li.toctree-l1.current li.toctree-l2.current > ul, .pytorch-left-menu li.toctree-l2.current li.toctree-l3.current > ul, +.pytorch-right-menu li.toctree-l1.current li.toctree-l2.current > ul, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3.current > ul { + display: block; +} +.pytorch-left-menu li.toctree-l2.current li.toctree-l3 > a, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3 > a { + display: block; +} +.pytorch-left-menu li.toctree-l3, +.pytorch-right-menu li.toctree-l3 { + font-size: 0.9em; +} +.pytorch-left-menu li.toctree-l3.current li.toctree-l4 > a, +.pytorch-right-menu li.toctree-l3.current li.toctree-l4 > a { + display: block; +} +.pytorch-left-menu li.toctree-l4, +.pytorch-right-menu li.toctree-l4 { + font-size: 0.9em; +} +.pytorch-left-menu li.current ul, +.pytorch-right-menu li.current ul { + display: block; +} +.pytorch-left-menu li ul, +.pytorch-right-menu li ul { + margin-bottom: 0; + display: none; +} +.pytorch-left-menu li ul li a, +.pytorch-right-menu li ul li a { + margin-bottom: 0; +} +.pytorch-left-menu a, +.pytorch-right-menu a { + display: inline-block; + position: relative; +} +.pytorch-left-menu a:hover, +.pytorch-right-menu a:hover { + cursor: pointer; +} +.pytorch-left-menu a:active, +.pytorch-right-menu a:active { + cursor: pointer; +} + +.pytorch-left-menu ul { + padding-left: 0; +} + +.pytorch-right-menu a:link, +.pytorch-right-menu a:visited, +.pytorch-right-menu a:hover { + color: #6c6c6d; +} +.pytorch-right-menu a:link span.pre, +.pytorch-right-menu a:visited span.pre, +.pytorch-right-menu a:hover span.pre { + color: #6c6c6d; +} +.pytorch-right-menu a.reference.internal.expanded:before { + content: "-"; + font-family: monospace; + position: absolute; + left: -12px; +} +.pytorch-right-menu a.reference.internal.not-expanded:before { + content: "+"; + font-family: monospace; + position: absolute; + left: -12px; +} +.pytorch-right-menu li.active > a { + color: #ee4c2c; +} +.pytorch-right-menu li.active > a span.pre, .pytorch-right-menu li.active > a:before { + color: #ee4c2c; +} +.pytorch-right-menu li.active > a:after { + content: "\2022"; + color: #e44c2c; + display: inline-block; + font-size: 1.375rem; + left: -17px; + position: absolute; + top: 1px; +} +.pytorch-right-menu .pytorch-side-scroll > ul > li > ul > li { + margin-bottom: 0; +} +.pytorch-right-menu ul ul { + padding-left: 0; +} +.pytorch-right-menu ul ul li { + padding-left: 0px; +} +.pytorch-right-menu ul ul li a.reference.internal { + padding-left: 0; +} +.pytorch-right-menu ul ul li ul { + display: none; + padding-left: 10px; +} +.pytorch-right-menu ul ul li li a.reference.internal { + padding-left: 0; +} +.pytorch-right-menu li ul { + display: block; +} + +.pytorch-right-menu .pytorch-side-scroll { + padding-top: 20px; +} +@media screen and (min-width: 1101px) { + .pytorch-right-menu .pytorch-side-scroll { + width: 120%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu .pytorch-side-scroll { + width: 400px; + } +} +.pytorch-right-menu .pytorch-side-scroll > ul { + padding-left: 10%; + padding-right: 10%; + margin-bottom: 0; +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu .pytorch-side-scroll > ul { + padding-left: 25px; + } +} +.pytorch-right-menu .pytorch-side-scroll > ul > li > a.reference.internal { + color: #262626; + font-weight: 500; +} +.pytorch-right-menu .pytorch-side-scroll ul li { + position: relative; +} + +.header-container { + max-width: none; + margin-top: 4px; +} +@media screen and (min-width: 1101px) { + .header-container { + margin-top: 0; + } +} +@media screen and (min-width: 1600px) { + .header-container { + margin-top: 0; + } +} + +.container-fluid.header-holder { + padding-right: 0; + padding-left: 0; +} + +.header-holder .container { + max-width: none; + padding-right: 1.875rem; + padding-left: 1.875rem; +} +@media screen and (min-width: 1101px) { + .header-holder .container { + padding-right: 1.875rem; + padding-left: 1.875rem; + } +} + +.header-holder .main-menu { + -webkit-box-pack: unset; + -ms-flex-pack: unset; + justify-content: unset; + position: relative; +} +@media screen and (min-width: 1101px) { + .header-holder .main-menu ul { + padding-left: 0; + margin-left: 26%; + } +} +@media screen and (min-width: 1600px) { + .header-holder .main-menu ul { + padding-left: 38px; + margin-left: 310px; + } +} + +.pytorch-page-level-bar { + display: none; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + background-color: #ffffff; + border-bottom: 1px solid #e2e2e2; + width: 100%; + z-index: 201; +} +@media screen and (min-width: 1101px) { + .pytorch-page-level-bar { + left: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 45px; + padding-left: 0; + width: 100%; + position: absolute; + z-index: 1; + } + .pytorch-page-level-bar.left-menu-is-fixed { + position: fixed; + top: 0; + left: 25%; + padding-left: 0; + right: 0; + width: 75%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-page-level-bar { + left: 0; + right: 0; + width: auto; + z-index: 1; + } + .pytorch-page-level-bar.left-menu-is-fixed { + left: 350px; + right: 0; + width: auto; + } +} +.pytorch-page-level-bar ul, .pytorch-page-level-bar li { + margin: 0; +} + +.pytorch-shortcuts-wrapper { + display: none; +} +@media screen and (min-width: 1101px) { + .pytorch-shortcuts-wrapper { + font-size: 0.875rem; + float: left; + margin-left: 2%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-shortcuts-wrapper { + margin-left: 1.875rem; + } +} + +.cookie-banner-wrapper { + display: none; +} +.cookie-banner-wrapper .container { + padding-left: 1.875rem; + padding-right: 1.875rem; + max-width: 1240px; +} +.cookie-banner-wrapper.is-visible { + display: block; + position: fixed; + bottom: 0; + background-color: #f3f4f7; + min-height: 100px; + width: 100%; + z-index: 401; + border-top: 3px solid #ededee; +} +.cookie-banner-wrapper .gdpr-notice { + color: #6c6c6d; + margin-top: 1.5625rem; + text-align: left; + max-width: 1440px; +} +@media screen and (min-width: 768px) { + .cookie-banner-wrapper .gdpr-notice { + width: 77%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .cookie-banner-wrapper .gdpr-notice { + width: inherit; + } +} +.cookie-banner-wrapper .gdpr-notice .cookie-policy-link { + color: #343434; +} +.cookie-banner-wrapper .close-button { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background: transparent; + border: 1px solid #f3f4f7; + height: 1.3125rem; + position: absolute; + bottom: 42px; + right: 0; + top: 0; + cursor: pointer; + outline: none; +} +@media screen and (min-width: 768px) { + .cookie-banner-wrapper .close-button { + right: 20%; + top: inherit; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .cookie-banner-wrapper .close-button { + right: 0; + top: 0; + } +} + +.main-menu ul li .ecosystem-dropdown, .main-menu ul li .resources-dropdown a { + cursor: pointer; +} +.main-menu ul li .dropdown-menu { + border-radius: 0; + padding: 0; +} +.main-menu ul li .dropdown-menu .dropdown-item { + color: #6c6c6d; + border-bottom: 1px solid #e2e2e2; +} +.main-menu ul li .dropdown-menu .dropdown-item:last-of-type { + border-bottom-color: transparent; +} +.main-menu ul li .dropdown-menu .dropdown-item:hover { + background-color: #e44c2c; +} +.main-menu ul li .dropdown-menu .dropdown-item p { + font-size: 1rem; + color: #979797; +} +.main-menu ul li .dropdown-menu a.dropdown-item:hover { + color: #ffffff; +} +.main-menu ul li .dropdown-menu a.dropdown-item:hover p { + color: #ffffff; +} + +.ecosystem-dropdown-menu, .resources-dropdown-menu { + left: -75px; + width: 226px; + display: none; + position: absolute; + top: 3.125rem; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + margin: 0.125rem 0 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #ffffff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.ecosystem-dropdown-menu.show-menu, .resources-dropdown-menu.show-menu { + display: block; +} + +.main-menu ul li .ecosystem-dropdown-menu, .main-menu ul li .resources-dropdown-menu { + border-radius: 0; + padding: 0; +} + +.main-menu ul li .ecosystem-dropdown-menu .dropdown-item, .main-menu ul li .resources-dropdown-menu .dropdown-item { + color: #6c6c6d; + border-bottom: 1px solid #e2e2e2; +} + +.header-holder .main-menu ul li a.nav-dropdown-item { + display: block; + font-size: 1rem; + line-height: 1.3125rem; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #979797; + text-align: center; + background-color: transparent; + border-bottom: 1px solid #e2e2e2; +} +.header-holder .main-menu ul li a.nav-dropdown-item:last-of-type { + border-bottom-color: transparent; +} +.header-holder .main-menu ul li a.nav-dropdown-item:hover { + background-color: #e44c2c; + color: white; +} +.header-holder .main-menu ul li a.nav-dropdown-item .dropdown-title { + font-size: 1.125rem; + color: #6c6c6d; + letter-spacing: 0; + line-height: 34px; +} + +.header-holder .main-menu ul li a.nav-dropdown-item:hover .dropdown-title { + background-color: #e44c2c; + color: white; +} + +/*# sourceMappingURL=theme.css.map */ \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/doctools.js b/docs/1.7.0/torchvision/_static/doctools.js new file mode 100644 index 000000000000..d8928926bf24 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/doctools.js @@ -0,0 +1,313 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var bbox = span.getBBox(); + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + var parentOfText = node.parentNode.parentNode; + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/documentation_options.js b/docs/1.7.0/torchvision/_static/documentation_options.js new file mode 100644 index 000000000000..33543c23978a --- /dev/null +++ b/docs/1.7.0/torchvision/_static/documentation_options.js @@ -0,0 +1,9 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: '', + VERSION: '0.8.0.dev20201012', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt' +}; \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/down-pressed.png b/docs/1.7.0/torchvision/_static/down-pressed.png new file mode 100644 index 000000000000..5756c8cad885 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/down-pressed.png differ diff --git a/docs/1.7.0/torchvision/_static/down.png b/docs/1.7.0/torchvision/_static/down.png new file mode 100644 index 000000000000..1b3bdad2ceff Binary files /dev/null and b/docs/1.7.0/torchvision/_static/down.png differ diff --git a/docs/1.7.0/torchvision/_static/file.png b/docs/1.7.0/torchvision/_static/file.png new file mode 100644 index 000000000000..a858a410e4fa Binary files /dev/null and b/docs/1.7.0/torchvision/_static/file.png differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold-italic.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold-italic.woff new file mode 100644 index 000000000000..e317248423c7 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold-italic.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 new file mode 100644 index 000000000000..cec2dc94fbb5 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold.woff new file mode 100644 index 000000000000..de46625edfc8 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold.woff2 new file mode 100644 index 000000000000..dc05cd82bc4d Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-bold.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book-italic.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book-italic.woff new file mode 100644 index 000000000000..a50e5038a405 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book-italic.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book-italic.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book-italic.woff2 new file mode 100644 index 000000000000..fe284db6614a Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book-italic.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book.woff new file mode 100644 index 000000000000..6ab8775f00b1 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book.woff2 new file mode 100644 index 000000000000..2688739f1f0b Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-book.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light-italic.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light-italic.woff new file mode 100644 index 000000000000..beda58d4e218 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light-italic.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light-italic.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light-italic.woff2 new file mode 100644 index 000000000000..e2fa0134b1a5 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light-italic.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light.woff new file mode 100644 index 000000000000..226a0bf83583 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light.woff2 new file mode 100644 index 000000000000..6d8ff2c045b0 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-light.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium-italic.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium-italic.woff new file mode 100644 index 000000000000..a42115d63b39 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium-italic.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 new file mode 100644 index 000000000000..16a7713a451a Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium.woff b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium.woff new file mode 100644 index 000000000000..5ea34539c6f5 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium.woff2 b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium.woff2 new file mode 100644 index 000000000000..c58b6a528bb6 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/FreightSans/freight-sans-medium.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff new file mode 100644 index 000000000000..cf37a5c50bdb Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 new file mode 100644 index 000000000000..955a6eab5bb8 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff new file mode 100644 index 000000000000..fc65a679c226 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 new file mode 100644 index 000000000000..c352e40e34a3 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff new file mode 100644 index 000000000000..7d63d89f24bc Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 new file mode 100644 index 000000000000..d0d7ded90791 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff new file mode 100644 index 000000000000..1da7753cf283 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff differ diff --git a/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 new file mode 100644 index 000000000000..79dffdb85f74 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 differ diff --git a/docs/1.7.0/torchvision/_static/images/arrow-down-orange.svg b/docs/1.7.0/torchvision/_static/images/arrow-down-orange.svg new file mode 100644 index 000000000000..e9d8e9ecf248 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/arrow-down-orange.svg @@ -0,0 +1,19 @@ + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/images/arrow-right-with-tail.svg b/docs/1.7.0/torchvision/_static/images/arrow-right-with-tail.svg new file mode 100644 index 000000000000..5843588fca6f --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/arrow-right-with-tail.svg @@ -0,0 +1,19 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/images/chevron-down-grey.svg b/docs/1.7.0/torchvision/_static/images/chevron-down-grey.svg new file mode 100644 index 000000000000..82d6514f2506 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/chevron-down-grey.svg @@ -0,0 +1,18 @@ + + + + +Created with Sketch. + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/chevron-down-orange.svg b/docs/1.7.0/torchvision/_static/images/chevron-down-orange.svg new file mode 100644 index 000000000000..fd79a57854cb --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/chevron-down-orange.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/chevron-right-orange.svg b/docs/1.7.0/torchvision/_static/images/chevron-right-orange.svg new file mode 100644 index 000000000000..7033fc93bf4f --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/chevron-right-orange.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/chevron-right-white.svg b/docs/1.7.0/torchvision/_static/images/chevron-right-white.svg new file mode 100644 index 000000000000..dd9e77f26165 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/chevron-right-white.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/images/home-footer-background.jpg b/docs/1.7.0/torchvision/_static/images/home-footer-background.jpg new file mode 100644 index 000000000000..b307bb57f485 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/images/home-footer-background.jpg differ diff --git a/docs/1.7.0/torchvision/_static/images/icon-close.svg b/docs/1.7.0/torchvision/_static/images/icon-close.svg new file mode 100644 index 000000000000..348964e79f7f --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/icon-close.svg @@ -0,0 +1,21 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/images/icon-menu-dots-dark.svg b/docs/1.7.0/torchvision/_static/images/icon-menu-dots-dark.svg new file mode 100644 index 000000000000..fa2ad044b3f6 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/icon-menu-dots-dark.svg @@ -0,0 +1,42 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/images/logo-dark.svg b/docs/1.7.0/torchvision/_static/images/logo-dark.svg new file mode 100644 index 000000000000..9b4c1a56ac65 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/logo-dark.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/logo-facebook-dark.svg b/docs/1.7.0/torchvision/_static/images/logo-facebook-dark.svg new file mode 100644 index 000000000000..cff17915c4f5 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/logo-facebook-dark.svg @@ -0,0 +1,8 @@ + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/logo-icon.svg b/docs/1.7.0/torchvision/_static/images/logo-icon.svg new file mode 100644 index 000000000000..575f6823e476 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/logo-icon.svg @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/logo-twitter-dark.svg b/docs/1.7.0/torchvision/_static/images/logo-twitter-dark.svg new file mode 100644 index 000000000000..1572570f88cc --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/logo-twitter-dark.svg @@ -0,0 +1,16 @@ + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/logo-youtube-dark.svg b/docs/1.7.0/torchvision/_static/images/logo-youtube-dark.svg new file mode 100644 index 000000000000..e3cfedd79d14 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/logo-youtube-dark.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/logo.svg b/docs/1.7.0/torchvision/_static/images/logo.svg new file mode 100644 index 000000000000..f8d44b98425f --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/logo.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/pytorch-colab.svg b/docs/1.7.0/torchvision/_static/images/pytorch-colab.svg new file mode 100644 index 000000000000..2ab15e2f3071 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/pytorch-colab.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/pytorch-download.svg b/docs/1.7.0/torchvision/_static/images/pytorch-download.svg new file mode 100644 index 000000000000..cc37d638e926 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/pytorch-download.svg @@ -0,0 +1,10 @@ + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/pytorch-github.svg b/docs/1.7.0/torchvision/_static/images/pytorch-github.svg new file mode 100644 index 000000000000..2c2570da1de9 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/pytorch-github.svg @@ -0,0 +1,15 @@ + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/pytorch-x.svg b/docs/1.7.0/torchvision/_static/images/pytorch-x.svg new file mode 100644 index 000000000000..74856ea9fdae --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/pytorch-x.svg @@ -0,0 +1,10 @@ + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/search-icon.svg b/docs/1.7.0/torchvision/_static/images/search-icon.svg new file mode 100644 index 000000000000..ebb0df867733 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/search-icon.svg @@ -0,0 +1,19 @@ + + + + Created with Sketch. + + + + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/images/view-page-source-icon.svg b/docs/1.7.0/torchvision/_static/images/view-page-source-icon.svg new file mode 100644 index 000000000000..6f5bbe0748fc --- /dev/null +++ b/docs/1.7.0/torchvision/_static/images/view-page-source-icon.svg @@ -0,0 +1,13 @@ + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/img/pytorch-logo-dark.png b/docs/1.7.0/torchvision/_static/img/pytorch-logo-dark.png new file mode 100644 index 000000000000..0288a564e227 Binary files /dev/null and b/docs/1.7.0/torchvision/_static/img/pytorch-logo-dark.png differ diff --git a/docs/1.7.0/torchvision/_static/img/pytorch-logo-dark.svg b/docs/1.7.0/torchvision/_static/img/pytorch-logo-dark.svg new file mode 100644 index 000000000000..717a3ce942f8 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/img/pytorch-logo-dark.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + diff --git a/docs/1.7.0/torchvision/_static/img/pytorch-logo-flame.png b/docs/1.7.0/torchvision/_static/img/pytorch-logo-flame.png new file mode 100644 index 000000000000..370633f2ec2b Binary files /dev/null and b/docs/1.7.0/torchvision/_static/img/pytorch-logo-flame.png differ diff --git a/docs/1.7.0/torchvision/_static/img/pytorch-logo-flame.svg b/docs/1.7.0/torchvision/_static/img/pytorch-logo-flame.svg new file mode 100644 index 000000000000..22d7228b4fa9 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/img/pytorch-logo-flame.svg @@ -0,0 +1,33 @@ + +image/svg+xml \ No newline at end of file diff --git a/docs/1.7.0/torchvision/_static/jquery-3.2.1.js b/docs/1.7.0/torchvision/_static/jquery-3.2.1.js new file mode 100644 index 000000000000..d2d8ca4790e5 --- /dev/null +++ b/docs/1.7.0/torchvision/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + torchvision.datasets — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchvision.datasets
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision.datasets

+

All datasets are subclasses of torch.utils.data.Dataset +i.e, they have __getitem__ and __len__ methods implemented. +Hence, they can all be passed to a torch.utils.data.DataLoader +which can load multiple samples parallelly using torch.multiprocessing workers. +For example:

+
imagenet_data = torchvision.datasets.ImageNet('path/to/imagenet_root/')
+data_loader = torch.utils.data.DataLoader(imagenet_data,
+                                          batch_size=4,
+                                          shuffle=True,
+                                          num_workers=args.nThreads)
+
+
+

The following datasets are available:

+ +

All the datasets have almost similar API. They all have two common arguments: +transform and target_transform to transform the input and target respectively.

+
+

CelebA

+
+
+class torchvision.datasets.CelebA(root: str, split: str = 'train', target_type: Union[List[str], str] = 'attr', transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

Large-scale CelebFaces Attributes (CelebA) Dataset Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory where images are downloaded to.
  • +
  • split (string) – One of {‘train’, ‘valid’, ‘test’, ‘all’}. +Accordingly dataset is selected.
  • +
  • target_type (string or list, optional) –

    Type of target to use, attr, identity, bbox, +or landmarks. Can also be a list to output a tuple with all specified target types. +The targets represent:

    +
    +
    attr (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes +identity (int): label for each person (data points with the same identity are the same person) +bbox (np.array shape=(4,) dtype=int): bounding box (x, y, width, height) +landmarks (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
    +
    righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
    +
    +

    Defaults to attr. If empty, None will be returned as target.

    +
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.ToTensor
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+ +
+
+

CIFAR

+
+
+class torchvision.datasets.CIFAR10(root: str, train: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

CIFAR10 Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where directory +cifar-10-batches-py exists or will be saved to if download is set to True.
  • +
  • train (bool, optional) – If True, creates dataset from training set, otherwise +creates from test set.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is index of the target class.
Return type:tuple
+
+ +
+ +
+
+class torchvision.datasets.CIFAR100(root: str, train: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

CIFAR100 Dataset.

+

This is a subclass of the CIFAR10 Dataset.

+
+ +
+
+

Cityscapes

+
+

Note

+

Requires Cityscape to be downloaded.

+
+
+
+class torchvision.datasets.Cityscapes(root: str, split: str = 'train', mode: str = 'fine', target_type: Union[List[str], str] = 'instance', transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, transforms: Union[Callable, NoneType] = None) → None[source]
+

Cityscapes Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where directory leftImg8bit +and gtFine or gtCoarse are located.
  • +
  • split (string, optional) – The image split to use, train, test or val if mode=”fine” +otherwise train, train_extra or val
  • +
  • mode (string, optional) – The quality mode to use, fine or coarse
  • +
  • target_type (string or list, optional) – Type of target to use, instance, semantic, polygon +or color. Can also be a list to output a tuple with all specified target types.
  • +
  • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.
  • +
+
+

Examples

+

Get semantic segmentation target

+
dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
+                     target_type='semantic')
+
+img, smnt = dataset[0]
+
+
+

Get multiple targets

+
dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
+                     target_type=['instance', 'color', 'polygon'])
+
+img, (inst, col, poly) = dataset[0]
+
+
+

Validate on the “coarse” set

+
dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse',
+                     target_type='semantic')
+
+img, smnt = dataset[0]
+
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is a tuple of all target types if target_type is a list with more +than one item. Otherwise target is a json object if target_type=”polygon”, else the image segmentation.
Return type:tuple
+
+ +
+ +
+
+

COCO

+
+

Note

+

These require the COCO API to be installed

+
+
+

Captions

+
+
+class torchvision.datasets.CocoCaptions(root: str, annFile: str, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, transforms: Union[Callable, NoneType] = None) → None[source]
+

MS Coco Captions Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory where images are downloaded to.
  • +
  • annFile (string) – Path to json annotation file.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.ToTensor
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.
  • +
+
+

Example

+
import torchvision.datasets as dset
+import torchvision.transforms as transforms
+cap = dset.CocoCaptions(root = 'dir where images are',
+                        annFile = 'json annotation file',
+                        transform=transforms.ToTensor())
+
+print('Number of samples: ', len(cap))
+img, target = cap[3] # load 4th sample
+
+print("Image Size: ", img.size())
+print(target)
+
+
+

Output:

+
Number of samples: 82783
+Image Size: (3L, 427L, 640L)
+[u'A plane emitting smoke stream flying over a mountain.',
+u'A plane darts across a bright blue sky behind a mountain covered in snow',
+u'A plane leaves a contrail above the snowy mountain top.',
+u'A mountain that has a plane flying overheard in the distance.',
+u'A mountain view with a plume of smoke in the background']
+
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:Tuple (image, target). target is a list of captions for the image.
Return type:tuple
+
+ +
+ +
+
+

Detection

+
+
+class torchvision.datasets.CocoDetection(root: str, annFile: str, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, transforms: Union[Callable, NoneType] = None) → None[source]
+

MS Coco Detection Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory where images are downloaded to.
  • +
  • annFile (string) – Path to json annotation file.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.ToTensor
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:Tuple (image, target). target is the object returned by coco.loadAnns.
Return type:tuple
+
+ +
+ +
+
+
+

DatasetFolder

+
+
+class torchvision.datasets.DatasetFolder(root: str, loader: Callable[[str], Any], extensions: Union[Tuple[str, ...], NoneType] = None, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, is_valid_file: Union[Callable[[str], bool], NoneType] = None) → None[source]
+

A generic data loader where the samples are arranged in this way:

+
root/class_x/xxx.ext
+root/class_x/xxy.ext
+root/class_x/xxz.ext
+
+root/class_y/123.ext
+root/class_y/nsdf3.ext
+root/class_y/asd932_.ext
+
+
+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory path.
  • +
  • loader (callable) – A function to load a sample given its path.
  • +
  • extensions (tuple[string]) – A list of allowed extensions. +both extensions and is_valid_file should not be passed.
  • +
  • transform (callable, optional) – A function/transform that takes in +a sample and returns a transformed version. +E.g, transforms.RandomCrop for images.
  • +
  • target_transform (callable, optional) – A function/transform that takes +in the target and transforms it.
  • +
  • is_valid_file – A function that takes path of a file +and check if the file is a valid file (used to check of corrupt files) +both extensions and is_valid_file should not be passed.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(sample, target) where target is class_index of the target class.
Return type:tuple
+
+ +
+ +
+
+

EMNIST

+
+
+class torchvision.datasets.EMNIST(root: str, split: str, **kwargs) → None[source]
+

EMNIST Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where EMNIST/processed/training.pt +and EMNIST/processed/test.pt exist.
  • +
  • split (string) – The dataset has 6 different splits: byclass, bymerge, +balanced, letters, digits and mnist. This argument specifies +which one to use.
  • +
  • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+ +
+
+

FakeData

+
+
+class torchvision.datasets.FakeData(size: int = 1000, image_size: Tuple[int, int, int] = (3, 224, 224), num_classes: int = 10, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, random_offset: int = 0) → None[source]
+

A fake dataset that returns randomly generated images and returns them as PIL images

+ +++ + + + +
Parameters:
    +
  • size (int, optional) – Size of the dataset. Default: 1000 images
  • +
  • image_size (tuple, optional) – Size if the returned images. Default: (3, 224, 224)
  • +
  • num_classes (int, optional) – Number of classes in the datset. Default: 10
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • random_offset (int) – Offsets the index-based random seed used to +generate each image. Default: 0
  • +
+
+
+ +
+
+

Fashion-MNIST

+
+
+class torchvision.datasets.FashionMNIST(root: str, train: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

Fashion-MNIST Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where FashionMNIST/processed/training.pt +and FashionMNIST/processed/test.pt exist.
  • +
  • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+ +
+
+

Flickr

+
+
+class torchvision.datasets.Flickr8k(root: str, ann_file: str, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None) → None[source]
+

Flickr8k Entities Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory where images are downloaded to.
  • +
  • ann_file (string) – Path to annotation file.
  • +
  • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.ToTensor
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:Tuple (image, target). target is a list of captions for the image.
Return type:tuple
+
+ +
+ +
+
+class torchvision.datasets.Flickr30k(root: str, ann_file: str, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None) → None[source]
+

Flickr30k Entities Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory where images are downloaded to.
  • +
  • ann_file (string) – Path to annotation file.
  • +
  • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.ToTensor
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:Tuple (image, target). target is a list of captions for the image.
Return type:tuple
+
+ +
+ +
+
+

HMDB51

+
+
+class torchvision.datasets.HMDB51(root, annotation_path, frames_per_clip, step_between_clips=1, frame_rate=None, fold=1, train=True, transform=None, _precomputed_metadata=None, num_workers=1, _video_width=0, _video_height=0, _video_min_dimension=0, _audio_samples=0)[source]
+

HMDB51 +dataset.

+

HMDB51 is an action recognition video dataset. +This dataset consider every video as a collection of video clips of fixed size, specified +by frames_per_clip, where the step in frames between each clip is given by +step_between_clips.

+

To give an example, for 2 videos with 10 and 15 frames respectively, if frames_per_clip=5 +and step_between_clips=5, the dataset size will be (2 + 3) = 5, where the first two +elements will come from video 1, and the next three elements from video 2. +Note that we drop clips which do not have exactly frames_per_clip elements, so not all +frames in a video might be present.

+

Internally, it uses a VideoClips object to handle clip creation.

+ +++ + + + + + + + +
Parameters:
    +
  • root (string) – Root directory of the HMDB51 Dataset.
  • +
  • annotation_path (str) – Path to the folder containing the split files.
  • +
  • frames_per_clip (int) – Number of frames in a clip.
  • +
  • step_between_clips (int) – Number of frames between each clip.
  • +
  • fold (int, optional) – Which fold to use. Should be between 1 and 3.
  • +
  • train (bool, optional) – If True, creates a dataset from the train split, +otherwise from the test split.
  • +
  • transform (callable, optional) – A function/transform that takes in a TxHxWxC video +and returns a transformed version.
  • +
+
Returns:

the T video frames +audio(Tensor[K, L]): the audio frames, where K is the number of channels

+
+

and L is the number of points

+
+

label (int): class of the video clip

+

+
Return type:

video (Tensor[T, H, W, C])

+
+
+ +
+
+

ImageFolder

+
+
+class torchvision.datasets.ImageFolder(root: str, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, loader: Callable[[str], Any] = <function default_loader>, is_valid_file: Union[Callable[[str], bool], NoneType] = None)[source]
+

A generic data loader where the images are arranged in this way:

+
root/dog/xxx.png
+root/dog/xxy.png
+root/dog/xxz.png
+
+root/cat/123.png
+root/cat/nsdf3.png
+root/cat/asd932_.png
+
+
+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory path.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • loader (callable, optional) – A function to load an image given its path.
  • +
  • is_valid_file – A function that takes path of an Image file +and check if the file is a valid file (used to check of corrupt files)
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(sample, target) where target is class_index of the target class.
Return type:tuple
+
+ +
+ +
+
+

ImageNet

+
+
+class torchvision.datasets.ImageNet(root: str, split: str = 'train', download: Union[str, NoneType] = None, **kwargs) → None[source]
+

ImageNet 2012 Classification Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of the ImageNet Dataset.
  • +
  • split (string, optional) – The dataset split, supports train, or val.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • loader – A function to load an image given its path.
  • +
+
+
+ +
+

Note

+

This requires scipy to be installed

+
+
+
+

Kinetics-400

+
+
+class torchvision.datasets.Kinetics400(root, frames_per_clip, step_between_clips=1, frame_rate=None, extensions=('avi', ), transform=None, _precomputed_metadata=None, num_workers=1, _video_width=0, _video_height=0, _video_min_dimension=0, _audio_samples=0, _audio_channels=0)[source]
+

Kinetics-400 +dataset.

+

Kinetics-400 is an action recognition video dataset. +This dataset consider every video as a collection of video clips of fixed size, specified +by frames_per_clip, where the step in frames between each clip is given by +step_between_clips.

+

To give an example, for 2 videos with 10 and 15 frames respectively, if frames_per_clip=5 +and step_between_clips=5, the dataset size will be (2 + 3) = 5, where the first two +elements will come from video 1, and the next three elements from video 2. +Note that we drop clips which do not have exactly frames_per_clip elements, so not all +frames in a video might be present.

+

Internally, it uses a VideoClips object to handle clip creation.

+ +++ + + + + + + + +
Parameters:
    +
  • root (string) – Root directory of the Kinetics-400 Dataset.
  • +
  • frames_per_clip (int) – number of frames in a clip
  • +
  • step_between_clips (int) – number of frames between each clip
  • +
  • transform (callable, optional) – A function/transform that takes in a TxHxWxC video +and returns a transformed version.
  • +
+
Returns:

the T video frames +audio(Tensor[K, L]): the audio frames, where K is the number of channels

+
+

and L is the number of points

+
+

label (int): class of the video clip

+

+
Return type:

video (Tensor[T, H, W, C])

+
+
+ +
+
+

KMNIST

+
+
+class torchvision.datasets.KMNIST(root: str, train: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

Kuzushiji-MNIST Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where KMNIST/processed/training.pt +and KMNIST/processed/test.pt exist.
  • +
  • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+ +
+
+

LSUN

+
+
+class torchvision.datasets.LSUN(root: str, classes: Union[str, List[str]] = 'train', transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None) → None[source]
+

LSUN dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory for the database files.
  • +
  • classes (string or list) – One of {‘train’, ‘val’, ‘test’} or a list of +categories to load. e,g. [‘bedroom_train’, ‘church_outdoor_train’].
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:Tuple (image, target) where target is the index of the target category.
Return type:tuple
+
+ +
+ +
+
+

MNIST

+
+
+class torchvision.datasets.MNIST(root: str, train: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

MNIST Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where MNIST/processed/training.pt +and MNIST/processed/test.pt exist.
  • +
  • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
+
+
+ +
+
+

Omniglot

+
+
+class torchvision.datasets.Omniglot(root: str, background: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

Omniglot Dataset. +:param root: Root directory of dataset where directory

+
+
omniglot-py exists.
+ +++ + + + +
Parameters:
    +
  • background (bool, optional) – If True, creates dataset from the “background” set, otherwise +creates from the “evaluation” set. This terminology is defined by the authors.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If true, downloads the dataset zip files from the internet and +puts it in root directory. If the zip files are already downloaded, they are not +downloaded again.
  • +
+
+
+ +
+
+

PhotoTour

+
+
+class torchvision.datasets.PhotoTour(root: str, name: str, train: bool = True, transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

Learning Local Image Descriptors Data Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory where images are.
  • +
  • name (string) – Name of the dataset to load.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+
+__getitem__(index: int) → Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(data1, data2, matches)
Return type:tuple
+
+ +
+ +
+
+

Places365

+
+
+class torchvision.datasets.Places365(root: str, split: str = 'train-standard', small: bool = False, download: bool = False, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, loader: Callable[[str], Any] = <function default_loader>) → None[source]
+

Places365 classification dataset.

+ +++ + + + + + +
Parameters:
    +
  • root (string) – Root directory of the Places365 dataset.
  • +
  • split (string, optional) – The dataset split. Can be one of train-standard (default), train-challendge, +val.
  • +
  • small (bool, optional) – If True, uses the small images, i. e. resized to 256 x 256 pixels, instead of the +high resolution ones.
  • +
  • download (bool, optional) – If True, downloads the dataset components and places them in root. Already +downloaded archives are not downloaded again.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • loader – A function to load an image given its path.
  • +
+
Raises:
    +
  • RuntimeError – If download is False and the meta files, i. e. the devkit, are not present or corrupted.
  • +
  • RuntimeError – If download is True and the image archive is already extracted.
  • +
+
+
+ +
+
+

QMNIST

+
+
+class torchvision.datasets.QMNIST(root: str, what: Union[str, NoneType] = None, compat: bool = True, train: bool = True, **kwargs) → None[source]
+

QMNIST Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset whose ``processed’’ +subdir contains torch binary files with the datasets.
  • +
  • what (string,optional) – Can be ‘train’, ‘test’, ‘test10k’, +‘test50k’, or ‘nist’ for respectively the mnist compatible +training set, the 60k qmnist testing set, the 10k qmnist +examples that match the mnist testing set, the 50k +remaining qmnist testing examples, or all the nist +digits. The default is to select ‘train’ or ‘test’ +according to the compatibility argument ‘train’.
  • +
  • compat (bool,optional) – A boolean that says whether the target +for each example is class number (for compatibility with +the MNIST dataloader) or a torch vector containing the +full qmnist information. Default=True.
  • +
  • download (bool, optional) – If true, downloads the dataset from +the internet and puts it in root directory. If dataset is +already downloaded, it is not downloaded again.
  • +
  • transform (callable, optional) – A function/transform that +takes in an PIL image and returns a transformed +version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform +that takes in the target and transforms it.
  • +
  • train (bool,optional,compatibility) – When argument ‘what’ is +not specified, this boolean decides whether to load the +training set ot the testing set. Default: True.
  • +
+
+
+ +
+
+

SBD

+
+
+class torchvision.datasets.SBDataset(root: str, image_set: str = 'train', mode: str = 'boundaries', download: bool = False, transforms: Union[Callable, NoneType] = None) → None[source]
+

Semantic Boundaries Dataset

+

The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset.

+
+

Note

+

Please note that the train and val splits included with this dataset are different from +the splits in the PASCAL VOC dataset. In particular some “train” images might be part of +VOC2012 val. +If you are interested in testing on VOC 2012 val, then use image_set=’train_noval’, +which excludes all val images.

+
+
+

Warning

+

This class needs scipy to load target files from .mat format.

+
+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of the Semantic Boundaries Dataset
  • +
  • image_set (string, optional) – Select the image_set to use, train, val or train_noval. +Image set train_noval excludes VOC 2012 val images.
  • +
  • mode (string, optional) – Select target type. Possible values ‘boundaries’ or ‘segmentation’. +In case of ‘boundaries’, the target is an array of shape [num_classes, H, W], +where num_classes=20.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
  • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version. Input sample is PIL image and target is a numpy array +if mode=’boundaries’ or PIL image if mode=’segmentation’.
  • +
+
+
+ +
+
+

SBU

+
+
+class torchvision.datasets.SBU(root: str, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = True) → None[source]
+

SBU Captioned Photo Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where tarball +SBUCaptionedPhotoDataset.tar.gz exists.
  • +
  • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If True, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is a caption for the photo.
Return type:tuple
+
+ +
+ +
+
+

STL10

+
+
+class torchvision.datasets.STL10(root: str, split: str = 'train', folds: Union[int, NoneType] = None, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

STL10 Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where directory +stl10_binary exists.
  • +
  • split (string) – One of {‘train’, ‘test’, ‘unlabeled’, ‘train+unlabeled’}. +Accordingly dataset is selected.
  • +
  • folds (int, optional) –

    One of {0-9} or None. +For training, loads one of the 10 pre-defined folds of 1k samples for the

    +
    +
    standard evaluation procedure. If no value is passed, loads the 5k samples.
    +
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is index of the target class.
Return type:tuple
+
+ +
+ +
+
+

SVHN

+
+
+class torchvision.datasets.SVHN(root: str, split: str = 'train', transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

SVHN Dataset. +Note: The SVHN dataset assigns the label 10 to the digit 0. However, in this Dataset, +we assign the label 0 to the digit 0 to be compatible with PyTorch loss functions which +expect the class labels to be in the range [0, C-1]

+
+

Warning

+

This class needs scipy to load data from .mat format.

+
+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset where directory +SVHN exists.
  • +
  • split (string) – One of {‘train’, ‘test’, ‘extra’}. +Accordingly dataset is selected. ‘extra’ is Extra training set.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is index of the target class.
Return type:tuple
+
+ +
+ +
+
+

UCF101

+
+
+class torchvision.datasets.UCF101(root, annotation_path, frames_per_clip, step_between_clips=1, frame_rate=None, fold=1, train=True, transform=None, _precomputed_metadata=None, num_workers=1, _video_width=0, _video_height=0, _video_min_dimension=0, _audio_samples=0)[source]
+

UCF101 dataset.

+

UCF101 is an action recognition video dataset. +This dataset consider every video as a collection of video clips of fixed size, specified +by frames_per_clip, where the step in frames between each clip is given by +step_between_clips.

+

To give an example, for 2 videos with 10 and 15 frames respectively, if frames_per_clip=5 +and step_between_clips=5, the dataset size will be (2 + 3) = 5, where the first two +elements will come from video 1, and the next three elements from video 2. +Note that we drop clips which do not have exactly frames_per_clip elements, so not all +frames in a video might be present.

+

Internally, it uses a VideoClips object to handle clip creation.

+ +++ + + + + + + + +
Parameters:
    +
  • root (string) – Root directory of the UCF101 Dataset.
  • +
  • annotation_path (str) – path to the folder containing the split files
  • +
  • frames_per_clip (int) – number of frames in a clip.
  • +
  • step_between_clips (int, optional) – number of frames between each clip.
  • +
  • fold (int, optional) – which fold to use. Should be between 1 and 3.
  • +
  • train (bool, optional) – if True, creates a dataset from the train split, +otherwise from the test split.
  • +
  • transform (callable, optional) – A function/transform that takes in a TxHxWxC video +and returns a transformed version.
  • +
+
Returns:

the T video frames +audio(Tensor[K, L]): the audio frames, where K is the number of channels

+
+

and L is the number of points

+
+

label (int): class of the video clip

+

+
Return type:

video (Tensor[T, H, W, C])

+
+
+ +
+
+

USPS

+
+
+class torchvision.datasets.USPS(root: str, train: bool = True, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, download: bool = False) → None[source]
+

USPS Dataset. +The data-format is : [label [index:value ]*256 n] * num_lines, where label lies in [1, 10]. +The value for each pixel lies in [-1, 1]. Here we transform the label into [0, 9] +and make pixel values in [0, 255].

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of dataset to store``USPS`` data files.
  • +
  • train (bool, optional) – If True, creates dataset from usps.bz2, +otherwise from usps.t.bz2.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is index of the target class.
Return type:tuple
+
+ +
+ +
+
+

VOC

+
+
+class torchvision.datasets.VOCSegmentation(root: str, year: str = '2012', image_set: str = 'train', download: bool = False, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, transforms: Union[Callable, NoneType] = None)[source]
+

Pascal VOC Segmentation Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of the VOC Dataset.
  • +
  • year (string, optional) – The dataset year, supports years 2007 to 2012.
  • +
  • image_set (string, optional) – Select the image_set to use, train, trainval or val
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.
  • +
  • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is the image segmentation.
Return type:tuple
+
+ +
+ +
+
+class torchvision.datasets.VOCDetection(root: str, year: str = '2012', image_set: str = 'train', download: bool = False, transform: Union[Callable, NoneType] = None, target_transform: Union[Callable, NoneType] = None, transforms: Union[Callable, NoneType] = None)[source]
+

Pascal VOC Detection Dataset.

+ +++ + + + +
Parameters:
    +
  • root (string) – Root directory of the VOC Dataset.
  • +
  • year (string, optional) – The dataset year, supports years 2007 to 2012.
  • +
  • image_set (string, optional) – Select the image_set to use, train, trainval or val
  • +
  • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again. +(default: alphabetic indexing of VOC’s 20 classes).
  • +
  • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop
  • +
  • target_transform (callable, required) – A function/transform that takes in the +target and transforms it.
  • +
  • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.
  • +
+
+
+
+__getitem__(index: int) → Tuple[Any, Any][source]
+
+++ + + + + + + + +
Parameters:index (int) – Index
Returns:(image, target) where target is a dictionary of the XML tree.
Return type:tuple
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/genindex.html b/docs/1.7.0/torchvision/genindex.html new file mode 100644 index 000000000000..e5ebd0c334a9 --- /dev/null +++ b/docs/1.7.0/torchvision/genindex.html @@ -0,0 +1,1181 @@ + + + + + + + + + + + + + Index — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ + +

Index

+ +
+ _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + +
+

_

+ + +
+ +

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + +
+ +

P

+ + + +
+ +

Q

+ + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ + + +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/index.html b/docs/1.7.0/torchvision/index.html new file mode 100644 index 000000000000..eca327faec3c --- /dev/null +++ b/docs/1.7.0/torchvision/index.html @@ -0,0 +1,701 @@ + + + + + + + + + + + + torchvision — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision

+

This library is part of the PyTorch project. PyTorch is an open source +machine learning framework.

+

Features described in this documentation are classified by release status:

+
+

Stable: These features will be maintained long-term and there should generally +be no major performance limitations or gaps in documentation. +We also expect to maintain backwards compatibility (although +breaking changes can happen and notice will be given one release ahead +of time).

+

Beta: Features are tagged as Beta because the API may change based on +user feedback, because the performance needs to improve, or because +coverage across operators is not yet complete. For Beta features, we are +committing to seeing the feature through to the Stable classification. +We are not, however, committing to backwards compatibility.

+

Prototype: These features are typically not available as part of +binary distributions like PyPI or Conda, except sometimes behind run-time +flags, and are at an early stage for feedback and testing.

+
+

The torchvision package consists of popular datasets, model +architectures, and common image transformations for computer vision.

+ +
+
+torchvision.get_image_backend()[source]
+

Gets the name of the package used to load images

+
+ +
+
+torchvision.set_image_backend(backend)[source]
+

Specifies the package used to load images.

+ +++ + + + +
Parameters:backend (string) – Name of the image backend. one of {‘PIL’, ‘accimage’}. +The accimage package uses the Intel IPP library. It is +generally faster than PIL, but does not support as many operations.
+
+ +
+
+torchvision.set_video_backend(backend)[source]
+

Specifies the package used to decode videos.

+ +++ + + + +
Parameters:backend (string) – Name of the video backend. one of {‘pyav’, ‘video_reader’}. +The pyav package uses the 3rd party PyAv library. It is a Pythonic +binding for the FFmpeg libraries. +The video_reader package includes a native C++ implementation on +top of FFMPEG libraries, and a python API of TorchScript custom operator. +It is generally decoding faster than pyav, but perhaps is less robust.
+
+ + +
+ + +
+ +
+
+ + + + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ + +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/io.html b/docs/1.7.0/torchvision/io.html new file mode 100644 index 000000000000..9bc945c12d2d --- /dev/null +++ b/docs/1.7.0/torchvision/io.html @@ -0,0 +1,827 @@ + + + + + + + + + + + + torchvision.io — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision.io

+

The torchvision.io package provides functions for performing IO +operations. They are currently specific to reading and writing video and +images.

+
+

Video

+
+
+torchvision.io.read_video(filename: str, start_pts: int = 0, end_pts: Union[float, NoneType] = None, pts_unit: str = 'pts') → Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]][source]
+

Reads a video from a file, returning both the video frames as well as +the audio frames

+ +++ + + + + + +
Parameters:
    +
  • filename (str) – path to the video file
  • +
  • start_pts (int if pts_unit = 'pts', optional) – float / Fraction if pts_unit = ‘sec’, optional +the start presentation time of the video
  • +
  • end_pts (int if pts_unit = 'pts', optional) – float / Fraction if pts_unit = ‘sec’, optional +the end presentation time
  • +
  • pts_unit (str, optional) – unit in which start_pts and end_pts values will be interpreted, either ‘pts’ or ‘sec’. Defaults to ‘pts’.
  • +
+
Returns:

    +
  • vframes (Tensor[T, H, W, C]) – the T video frames
  • +
  • aframes (Tensor[K, L]) – the audio frames, where K is the number of channels and L is the +number of points
  • +
  • info (Dict) – metadata for the video and audio. Can contain the fields video_fps (float) +and audio_fps (int)
  • +
+

+
+
+ +
+
+torchvision.io.read_video_timestamps(filename: str, pts_unit: str = 'pts') → Tuple[List[int], Union[float, NoneType]][source]
+

List the video frames timestamps.

+

Note that the function decodes the whole video frame-by-frame.

+ +++ + + + + + +
Parameters:
    +
  • filename (str) – path to the video file
  • +
  • pts_unit (str, optional) – unit in which timestamp values will be returned either ‘pts’ or ‘sec’. Defaults to ‘pts’.
  • +
+
Returns:

    +
  • pts (List[int] if pts_unit = ‘pts’) – List[Fraction] if pts_unit = ‘sec’ +presentation timestamps for each one of the frames in the video.
  • +
  • video_fps (float, optional) – the frame rate for the video
  • +
+

+
+
+ +
+
+torchvision.io.write_video(filename: str, video_array: torch.Tensor, fps: float, video_codec: str = 'libx264', options: Union[Dict[str, Any], NoneType] = None) → None[source]
+

Writes a 4d tensor in [T, H, W, C] format in a video file

+ +++ + + + +
Parameters:
    +
  • filename (str) – path where the video will be saved
  • +
  • video_array (Tensor[T, H, W, C]) – tensor containing the individual frames, as a uint8 tensor in [T, H, W, C] format
  • +
  • fps (Number) – frames per second
  • +
+
+
+ +
+
+

Fine-grained video API

+

In addition to the read_video function, we provide a high-performance +lower-level API for more fine-grained control compared to the read_video function. +It does all this whilst fully supporting torchscript.

+

Example of inspecting a video:

+
import torchvision
+video_path = "path to a test video"
+# Constructor allocates memory and a threaded decoder
+# instance per video. At the momet it takes two arguments:
+# path to the video file, and a wanted stream.
+reader = torchvision.io.VideoReader(video_path, "video")
+
+# The information about the video can be retrieved using the
+# `get_metadata()` method. It returns a dictionary for every stream, with
+# duration and other relevant metadata (often frame rate)
+reader_md = reader.get_metadata()
+
+# metadata is structured as a dict of dicts with following structure
+# {"stream_type": {"attribute": [attribute per stream]}}
+#
+# following would print out the list of frame rates for every present video stream
+print(reader_md["video"]["fps"])
+
+# we explicitly select the stream we would like to operate on. In
+# the constructor we select a default video stream, but
+# in practice, we can set whichever stream we would like
+video.set_current_stream("video:0")
+
+
+
+
+

Image

+
+
+torchvision.io.read_image(path: str) → torch.Tensor[source]
+

Reads a JPEG or PNG image into a 3 dimensional RGB Tensor. +The values of the output tensor are uint8 between 0 and 255.

+ +++ + + + + + + + +
Parameters:path (str) – path of the JPEG or PNG image.
Returns:output
Return type:Tensor[3, image_height, image_width]
+
+ +
+
+torchvision.io.decode_image(input: torch.Tensor) → torch.Tensor[source]
+

Detects whether an image is a JPEG or PNG and performs the appropriate +operation to decode the image into a 3 dimensional RGB Tensor.

+

The values of the output tensor are uint8 between 0 and 255.

+ +++ + + + + + + + +
Parameters:input (Tensor) – a one dimensional uint8 tensor containing the raw bytes of the +PNG or JPEG image.
Returns:output
Return type:Tensor[3, image_height, image_width]
+
+ +
+
+torchvision.io.encode_jpeg(input: torch.Tensor, quality: int = 75) → torch.Tensor[source]
+

Takes an input tensor in CHW layout and returns a buffer with the contents +of its corresponding JPEG file.

+ +++ + + + + + + + +
Parameters:
    +
  • input (Tensor[channels, image_height, image_width])) – int8 image tensor of c channels, where c must be 1 or 3.
  • +
  • quality (int) – Quality of the resulting JPEG file, it must be a number between +1 and 100. Default: 75
  • +
+
Returns:

output – A one dimensional int8 tensor that contains the raw bytes of the +JPEG file.

+
Return type:

Tensor[1]

+
+
+ +
+
+torchvision.io.write_jpeg(input: torch.Tensor, filename: str, quality: int = 75)[source]
+

Takes an input tensor in CHW layout and saves it in a JPEG file.

+ +++ + + + +
Parameters:
    +
  • input (Tensor[channels, image_height, image_width]) – int8 image tensor of c channels, where c must be 1 or 3.
  • +
  • filename (str) – Path to save the image.
  • +
  • quality (int) – Quality of the resulting JPEG file, it must be a number +between 1 and 100. Default: 75
  • +
+
+
+ +
+
+torchvision.io.encode_png(input: torch.Tensor, compression_level: int = 6) → torch.Tensor[source]
+

Takes an input tensor in CHW layout and returns a buffer with the contents +of its corresponding PNG file.

+ +++ + + + + + + + +
Parameters:
    +
  • input (Tensor[channels, image_height, image_width]) – int8 image tensor of c channels, where c must 3 or 1.
  • +
  • compression_level (int) – Compression factor for the resulting file, it must be a number +between 0 and 9. Default: 6
  • +
+
Returns:

output – A one dimensional int8 tensor that contains the raw bytes of the +PNG file.

+
Return type:

Tensor[1]

+
+
+ +
+
+torchvision.io.write_png(input: torch.Tensor, filename: str, compression_level: int = 6)[source]
+

Takes an input tensor in CHW layout (or HW in the case of grayscale images) +and saves it in a PNG file.

+ +++ + + + +
Parameters:
    +
  • input (Tensor[channels, image_height, image_width]) – int8 image tensor of c channels, where c must be 1 or 3.
  • +
  • filename (str) – Path to save the image.
  • +
  • compression_level (int) – Compression factor for the resulting file, it must be a number +between 0 and 9. Default: 6
  • +
+
+
+ +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/models.html b/docs/1.7.0/torchvision/models.html new file mode 100644 index 000000000000..b58759e77f55 --- /dev/null +++ b/docs/1.7.0/torchvision/models.html @@ -0,0 +1,2199 @@ + + + + + + + + + + + + torchvision.models — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchvision.models
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision.models

+

The models subpackage contains definitions of models for addressing +different tasks, including: image classification, pixelwise semantic +segmentation, object detection, instance segmentation, person +keypoint detection and video classification.

+
+

Classification

+

The models subpackage contains definitions for the following model +architectures for image classification:

+ +

You can construct a model with random weights by calling its constructor:

+
import torchvision.models as models
+resnet18 = models.resnet18()
+alexnet = models.alexnet()
+vgg16 = models.vgg16()
+squeezenet = models.squeezenet1_0()
+densenet = models.densenet161()
+inception = models.inception_v3()
+googlenet = models.googlenet()
+shufflenet = models.shufflenet_v2_x1_0()
+mobilenet = models.mobilenet_v2()
+resnext50_32x4d = models.resnext50_32x4d()
+wide_resnet50_2 = models.wide_resnet50_2()
+mnasnet = models.mnasnet1_0()
+
+
+

We provide pre-trained models, using the PyTorch torch.utils.model_zoo. +These can be constructed by passing pretrained=True:

+
import torchvision.models as models
+resnet18 = models.resnet18(pretrained=True)
+alexnet = models.alexnet(pretrained=True)
+squeezenet = models.squeezenet1_0(pretrained=True)
+vgg16 = models.vgg16(pretrained=True)
+densenet = models.densenet161(pretrained=True)
+inception = models.inception_v3(pretrained=True)
+googlenet = models.googlenet(pretrained=True)
+shufflenet = models.shufflenet_v2_x1_0(pretrained=True)
+mobilenet = models.mobilenet_v2(pretrained=True)
+resnext50_32x4d = models.resnext50_32x4d(pretrained=True)
+wide_resnet50_2 = models.wide_resnet50_2(pretrained=True)
+mnasnet = models.mnasnet1_0(pretrained=True)
+
+
+

Instancing a pre-trained model will download its weights to a cache directory. +This directory can be set using the TORCH_MODEL_ZOO environment variable. See +torch.utils.model_zoo.load_url() for details.

+

Some models use modules which have different training and evaluation +behavior, such as batch normalization. To switch between these modes, use +model.train() or model.eval() as appropriate. See +train() or eval() for details.

+

All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), +where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized +using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. +You can use the following transform to normalize:

+
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225])
+
+
+

An example of such normalization can be found in the imagenet example +here

+

The process for obtaining the values of mean and std is roughly equivalent +to:

+
import torch
+from torchvision import datasets, transforms as T
+
+transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])
+dataset = datasets.ImageNet(".", split="train", transform=transform)
+
+means = []
+stds = []
+for img in subset(dataset):
+    means.append(torch.mean(img))
+    stds.append(torch.std(img))
+
+mean = torch.mean(torch.tensor(means))
+std = torch.mean(torch.tensor(stds))
+
+
+

Unfortunately, the concrete subset that was used is lost. For more +information see this discussion +or these experiments.

+

ImageNet 1-crop error rates (224x224)

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NetworkTop-1 errorTop-5 error
AlexNet43.4520.91
VGG-1130.9811.37
VGG-1330.0710.75
VGG-1628.419.62
VGG-1927.629.12
VGG-11 with batch normalization29.6210.19
VGG-13 with batch normalization28.459.63
VGG-16 with batch normalization26.638.50
VGG-19 with batch normalization25.768.15
ResNet-1830.2410.92
ResNet-3426.708.58
ResNet-5023.857.13
ResNet-10122.636.44
ResNet-15221.695.94
SqueezeNet 1.041.9019.58
SqueezeNet 1.141.8119.38
Densenet-12125.357.83
Densenet-16924.007.00
Densenet-20122.806.43
Densenet-16122.356.20
Inception v322.556.44
GoogleNet30.2210.47
ShuffleNet V230.6411.68
MobileNet V228.129.71
ResNeXt-50-32x4d22.386.30
ResNeXt-101-32x8d20.695.47
Wide ResNet-50-221.495.91
Wide ResNet-101-221.165.72
MNASNet 1.026.498.456
+
+

Alexnet

+
+
+torchvision.models.alexnet(pretrained=False, progress=True, **kwargs)[source]
+

AlexNet model architecture from the +“One weird trick…” paper.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

VGG

+
+
+torchvision.models.vgg11(pretrained=False, progress=True, **kwargs)[source]
+

VGG 11-layer model (configuration “A”) from +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg11_bn(pretrained=False, progress=True, **kwargs)[source]
+

VGG 11-layer model (configuration “A”) with batch normalization +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg13(pretrained=False, progress=True, **kwargs)[source]
+

VGG 13-layer model (configuration “B”) +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg13_bn(pretrained=False, progress=True, **kwargs)[source]
+

VGG 13-layer model (configuration “B”) with batch normalization +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg16(pretrained=False, progress=True, **kwargs)[source]
+

VGG 16-layer model (configuration “D”) +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg16_bn(pretrained=False, progress=True, **kwargs)[source]
+

VGG 16-layer model (configuration “D”) with batch normalization +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg19(pretrained=False, progress=True, **kwargs)[source]
+

VGG 19-layer model (configuration “E”) +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.vgg19_bn(pretrained=False, progress=True, **kwargs)[source]
+

VGG 19-layer model (configuration ‘E’) with batch normalization +“Very Deep Convolutional Networks For Large-Scale Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

ResNet

+
+
+torchvision.models.resnet18(pretrained=False, progress=True, **kwargs)[source]
+

ResNet-18 model from +“Deep Residual Learning for Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.resnet34(pretrained=False, progress=True, **kwargs)[source]
+

ResNet-34 model from +“Deep Residual Learning for Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.resnet50(pretrained=False, progress=True, **kwargs)[source]
+

ResNet-50 model from +“Deep Residual Learning for Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.resnet101(pretrained=False, progress=True, **kwargs)[source]
+

ResNet-101 model from +“Deep Residual Learning for Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.resnet152(pretrained=False, progress=True, **kwargs)[source]
+

ResNet-152 model from +“Deep Residual Learning for Image Recognition”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

SqueezeNet

+
+
+torchvision.models.squeezenet1_0(pretrained=False, progress=True, **kwargs)[source]
+

SqueezeNet model architecture from the “SqueezeNet: AlexNet-level +accuracy with 50x fewer parameters and <0.5MB model size” paper.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.squeezenet1_1(pretrained=False, progress=True, **kwargs)[source]
+

SqueezeNet 1.1 model from the official SqueezeNet repo. +SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters +than SqueezeNet 1.0, without sacrificing accuracy.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

DenseNet

+
+
+torchvision.models.densenet121(pretrained=False, progress=True, **kwargs)[source]
+

Densenet-121 model from +“Densely Connected Convolutional Networks”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • memory_efficient (bool) – but slower. Default: False. See “paper”
  • +
+
+
+ +
+
+torchvision.models.densenet169(pretrained=False, progress=True, **kwargs)[source]
+

Densenet-169 model from +“Densely Connected Convolutional Networks”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • memory_efficient (bool) –

    but slower. Default: False. See “paper”

    +
  • +
+
+
+ +
+
+torchvision.models.densenet161(pretrained=False, progress=True, **kwargs)[source]
+

Densenet-161 model from +“Densely Connected Convolutional Networks”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • memory_efficient (bool) –

    but slower. Default: False. See “paper”

    +
  • +
+
+
+ +
+
+torchvision.models.densenet201(pretrained=False, progress=True, **kwargs)[source]
+

Densenet-201 model from +“Densely Connected Convolutional Networks”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • memory_efficient (bool) –

    but slower. Default: False. See “paper”

    +
  • +
+
+
+ +
+
+

Inception v3

+
+
+torchvision.models.inception_v3(pretrained=False, progress=True, **kwargs)[source]
+

Inception v3 model architecture from +“Rethinking the Inception Architecture for Computer Vision”.

+
+

Note

+

Important: In contrast to the other models the inception_v3 expects tensors with a size of +N x 3 x 299 x 299, so ensure your images are sized accordingly.

+
+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • aux_logits (bool) – If True, add an auxiliary branch that can improve training. +Default: True
  • +
  • transform_input (bool) – If True, preprocesses the input according to the method with which it +was trained on ImageNet. Default: False
  • +
+
+
+ +
+

Note

+

This requires scipy to be installed

+
+
+
+

GoogLeNet

+
+
+torchvision.models.googlenet(pretrained=False, progress=True, **kwargs)[source]
+

GoogLeNet (Inception v1) model architecture from +“Going Deeper with Convolutions”.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • aux_logits (bool) – If True, adds two auxiliary branches that can improve training. +Default: False when pretrained is True otherwise True
  • +
  • transform_input (bool) – If True, preprocesses the input according to the method with which it +was trained on ImageNet. Default: False
  • +
+
+
+ +
+

Note

+

This requires scipy to be installed

+
+
+
+

ShuffleNet v2

+
+
+torchvision.models.shufflenet_v2_x0_5(pretrained=False, progress=True, **kwargs)[source]
+

Constructs a ShuffleNetV2 with 0.5x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.shufflenet_v2_x1_0(pretrained=False, progress=True, **kwargs)[source]
+

Constructs a ShuffleNetV2 with 1.0x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.shufflenet_v2_x1_5(pretrained=False, progress=True, **kwargs)[source]
+

Constructs a ShuffleNetV2 with 1.5x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.shufflenet_v2_x2_0(pretrained=False, progress=True, **kwargs)[source]
+

Constructs a ShuffleNetV2 with 2.0x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

MobileNet v2

+
+
+torchvision.models.mobilenet_v2(pretrained=False, progress=True, **kwargs)[source]
+

Constructs a MobileNetV2 architecture from +“MobileNetV2: Inverted Residuals and Linear Bottlenecks”.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

ResNext

+
+
+torchvision.models.resnext50_32x4d(pretrained=False, progress=True, **kwargs)[source]
+

ResNeXt-50 32x4d model from +“Aggregated Residual Transformation for Deep Neural Networks”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.resnext101_32x8d(pretrained=False, progress=True, **kwargs)[source]
+

ResNeXt-101 32x8d model from +“Aggregated Residual Transformation for Deep Neural Networks”

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

Wide ResNet

+
+
+torchvision.models.wide_resnet50_2(pretrained=False, progress=True, **kwargs)[source]
+

Wide ResNet-50-2 model from +“Wide Residual Networks”

+

The model is the same as ResNet except for the bottleneck number of channels +which is twice larger in every block. The number of channels in outer 1x1 +convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 +channels, and in Wide ResNet-50-2 has 2048-1024-2048.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.wide_resnet101_2(pretrained=False, progress=True, **kwargs)[source]
+

Wide ResNet-101-2 model from +“Wide Residual Networks”

+

The model is the same as ResNet except for the bottleneck number of channels +which is twice larger in every block. The number of channels in outer 1x1 +convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 +channels, and in Wide ResNet-50-2 has 2048-1024-2048.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on ImageNet
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

MNASNet

+
+
+torchvision.models.mnasnet0_5(pretrained=False, progress=True, **kwargs)[source]
+

MNASNet with depth multiplier of 0.5 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

+
+ +
+
+torchvision.models.mnasnet0_75(pretrained=False, progress=True, **kwargs)[source]
+

MNASNet with depth multiplier of 0.75 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

+
+ +
+
+torchvision.models.mnasnet1_0(pretrained=False, progress=True, **kwargs)[source]
+

MNASNet with depth multiplier of 1.0 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

+
+ +
+
+torchvision.models.mnasnet1_3(pretrained=False, progress=True, **kwargs)[source]
+

MNASNet with depth multiplier of 1.3 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

+
+ +
+
+
+

Semantic Segmentation

+

The models subpackage contains definitions for the following model +architectures for semantic segmentation:

+ +

As with image classification models, all pre-trained models expect input images normalized in the same way. +The images have to be loaded in to a range of [0, 1] and then normalized using +mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. +They have been trained on images resized such that their minimum size is 520.

+

The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are +present in the Pascal VOC dataset. You can see more information on how the subset has been selected in +references/segmentation/coco_utils.py. The classes that the pre-trained model outputs are the following, +in order:

+
+
['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
+ 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
+ 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
+
+
+
+

The accuracies of the pre-trained models evaluated on COCO val2017 are as follows

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + +
Networkmean IoUglobal pixelwise acc
FCN ResNet5060.591.4
FCN ResNet10163.791.9
DeepLabV3 ResNet5066.492.4
DeepLabV3 ResNet10167.492.4
+
+

Fully Convolutional Networks

+
+
+torchvision.models.segmentation.fcn_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
+

Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.segmentation.fcn_resnet101(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
+

Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+

DeepLabV3

+
+
+torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
+

Constructs a DeepLabV3 model with a ResNet-50 backbone.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
+

Constructs a DeepLabV3 model with a ResNet-101 backbone.

+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
+
+ +
+
+
+

Object Detection, Instance Segmentation and Person Keypoint Detection

+

The models subpackage contains definitions for the following model +architectures for detection:

+ +

The pre-trained models for detection, instance segmentation and +keypoint detection are initialized with the classification models +in torchvision.

+

The models expect a list of Tensor[C, H, W], in the range 0-1. +The models internally resize the images so that they have a minimum size +of 800. This option can be changed by passing the option min_size +to the constructor of the models.

+

For object detection and instance segmentation, the pre-trained +models return the predictions of the following classes:

+
+
COCO_INSTANCE_CATEGORY_NAMES = [
+    '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
+    'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
+    'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
+    'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+    'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
+    'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
+    'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
+    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
+    'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
+]
+
+
+
+

Here are the summary of the accuracies for the models trained on +the instances set of COCO train2017 and evaluated on COCO val2017.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + +
Networkbox APmask APkeypoint AP
Faster R-CNN ResNet-50 FPN37.0
    +
  • +
+
    +
  • +
+
RetinaNet ResNet-50 FPN36.4
    +
  • +
+
    +
  • +
+
Mask R-CNN ResNet-50 FPN37.934.6
    +
  • +
+
+

For person keypoint detection, the accuracies for the pre-trained +models are as follows

+ ++++++ + + + + + + + + + + + + + + +
Networkbox APmask APkeypoint AP
Keypoint R-CNN ResNet-50 FPN54.6
    +
  • +
+
65.0
+

For person keypoint detection, the pre-trained model return the +keypoints in the following order:

+
+
COCO_PERSON_KEYPOINT_NAMES = [
+    'nose',
+    'left_eye',
+    'right_eye',
+    'left_ear',
+    'right_ear',
+    'left_shoulder',
+    'right_shoulder',
+    'left_elbow',
+    'right_elbow',
+    'left_wrist',
+    'right_wrist',
+    'left_hip',
+    'right_hip',
+    'left_knee',
+    'right_knee',
+    'left_ankle',
+    'right_ankle'
+]
+
+
+
+
+

Runtime characteristics

+

The implementations of the models for object detection, instance segmentation +and keypoint detection are efficient.

+

In the following table, we use 8 V100 GPUs, with CUDA 10.0 and CUDNN 7.4 to +report the results. During training, we use a batch size of 2 per GPU, and +during testing a batch size of 1 is used.

+

For test time, we report the time for the model evaluation and postprocessing +(including mask pasting in image), but not the time for computing the +precision-recall.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Networktrain time (s / it)test time (s / it)memory (GB)
Faster R-CNN ResNet-50 FPN0.22880.05905.2
RetinaNet ResNet-50 FPN0.25140.09394.1
Mask R-CNN ResNet-50 FPN0.27280.09035.4
Keypoint R-CNN ResNet-50 FPN0.37890.12426.8
+
+
+

Faster R-CNN

+
+
+torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs)[source]
+

Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.

+

The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each +image, and should be in 0-1 range. Different images can have different sizes.

+

The behavior of the model changes depending if it is in training or evaluation mode.

+

During training, the model expects both the input tensors, as well as a targets (list of dictionary), +containing:

+
+
    +
  • boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x +between 0 and W and values of y between 0 and H
  • +
  • labels (Int64Tensor[N]): the class label for each ground-truth box
  • +
+
+

The model returns a Dict[Tensor] during training, containing the classification and regression +losses for both the RPN and the R-CNN.

+

During inference, the model requires only the input tensors, and returns the post-processed +predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as +follows:

+
+
    +
  • boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x +between 0 and W and values of y between 0 and H
  • +
  • labels (Int64Tensor[N]): the predicted labels for each image
  • +
  • scores (Tensor[N]): the scores or each prediction
  • +
+
+

Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.

+

Example:

+
>>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
+>>> # For training
+>>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)
+>>> labels = torch.randint(1, 91, (4, 11))
+>>> images = list(image for image in images)
+>>> targets = []
+>>> for i in range(len(images)):
+>>>     d = {}
+>>>     d['boxes'] = boxes[i]
+>>>     d['labels'] = labels[i]
+>>>     targets.append(d)
+>>> output = model(images, targets)
+>>> # For inference
+>>> model.eval()
+>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
+>>> predictions = model(x)
+>>>
+>>> # optionally, if you want to export the model to ONNX:
+>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
+
+
+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • pretrained_backbone (bool) – If True, returns a model with backbone pre-trained on Imagenet
  • +
  • num_classes (int) – number of output classes of the model (including the background)
  • +
  • trainable_backbone_layers (int) – number of trainable (not frozen) resnet layers starting from final block. +Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
  • +
+
+
+ +
+
+

RetinaNet

+
+
+

Mask R-CNN

+
+
+torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs)[source]
+

Constructs a Mask R-CNN model with a ResNet-50-FPN backbone.

+

The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each +image, and should be in 0-1 range. Different images can have different sizes.

+

The behavior of the model changes depending if it is in training or evaluation mode.

+

During training, the model expects both the input tensors, as well as a targets (list of dictionary), +containing:

+
+
    +
  • boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x +between 0 and W and values of y between 0 and H
  • +
  • labels (Int64Tensor[N]): the class label for each ground-truth box
  • +
  • masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance
  • +
+
+

The model returns a Dict[Tensor] during training, containing the classification and regression +losses for both the RPN and the R-CNN, and the mask loss.

+

During inference, the model requires only the input tensors, and returns the post-processed +predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as +follows:

+
+
    +
  • boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x +between 0 and W and values of y between 0 and H
  • +
  • labels (Int64Tensor[N]): the predicted labels for each image
  • +
  • scores (Tensor[N]): the scores or each prediction
  • +
  • masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to +obtain the final segmentation masks, the soft masks can be thresholded, generally +with a value of 0.5 (mask >= 0.5)
  • +
+
+

Mask R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.

+

Example:

+
>>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
+>>> model.eval()
+>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
+>>> predictions = model(x)
+>>>
+>>> # optionally, if you want to export the model to ONNX:
+>>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11)
+
+
+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • pretrained_backbone (bool) – If True, returns a model with backbone pre-trained on Imagenet
  • +
  • num_classes (int) – number of output classes of the model (including the background)
  • +
  • trainable_backbone_layers (int) – number of trainable (not frozen) resnet layers starting from final block. +Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
  • +
+
+
+ +
+
+

Keypoint R-CNN

+
+
+torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=2, num_keypoints=17, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs)[source]
+

Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.

+

The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each +image, and should be in 0-1 range. Different images can have different sizes.

+

The behavior of the model changes depending if it is in training or evaluation mode.

+

During training, the model expects both the input tensors, as well as a targets (list of dictionary), +containing:

+
+
    +
  • boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x +between 0 and W and values of y between 0 and H
  • +
  • labels (Int64Tensor[N]): the class label for each ground-truth box
  • +
  • keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the +format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
  • +
+
+

The model returns a Dict[Tensor] during training, containing the classification and regression +losses for both the RPN and the R-CNN, and the keypoint loss.

+

During inference, the model requires only the input tensors, and returns the post-processed +predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as +follows:

+
+
    +
  • boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x +between 0 and W and values of y between 0 and H
  • +
  • labels (Int64Tensor[N]): the predicted labels for each image
  • +
  • scores (Tensor[N]): the scores or each prediction
  • +
  • keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
  • +
+
+

Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.

+

Example:

+
>>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)
+>>> model.eval()
+>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
+>>> predictions = model(x)
+>>>
+>>> # optionally, if you want to export the model to ONNX:
+>>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
+
+
+ +++ + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on COCO train2017
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
  • pretrained_backbone (bool) – If True, returns a model with backbone pre-trained on Imagenet
  • +
  • num_classes (int) – number of output classes of the model (including the background)
  • +
  • trainable_backbone_layers (int) – number of trainable (not frozen) resnet layers starting from final block. +Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
  • +
+
+
+ +
+
+
+

Video classification

+

We provide models for action recognition pre-trained on Kinetics-400. +They have all been trained with the scripts provided in references/video_classification.

+

All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W), +where H and W are expected to be 112, and T is a number of video frames in a clip. +The images have to be loaded in to a range of [0, 1] and then normalized +using mean = [0.43216, 0.394666, 0.37645] and std = [0.22803, 0.22145, 0.216989].

+
+

Note

+

The normalization parameters are different from the image classification ones, and correspond +to the mean and std from Kinetics-400.

+
+
+

Note

+

For now, normalization code can be found in references/video_classification/transforms.py, +see the Normalize function there. Note that it differs from standard normalization for +images because it assumes the video is 4d.

+
+

Kinetics 1-crop accuracies for clip length 16 (16x112x112)

+ +++++ + + + + + + + + + + + + + + + + + + + + +
NetworkClip acc@1Clip acc@5
ResNet 3D 1852.7575.45
ResNet MC 1853.9076.29
ResNet (2+1)D57.5078.81
+
+

ResNet 3D

+
+
+torchvision.models.video.r3d_18(pretrained=False, progress=True, **kwargs)[source]
+

Construct 18 layer Resnet3D model as in +https://arxiv.org/abs/1711.11248

+ +++ + + + + + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on Kinetics-400
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
Returns:

R3D-18 network

+
Return type:

nn.Module

+
+
+ +
+
+

ResNet Mixed Convolution

+
+
+torchvision.models.video.mc3_18(pretrained=False, progress=True, **kwargs)[source]
+

Constructor for 18 layer Mixed Convolution network as in +https://arxiv.org/abs/1711.11248

+ +++ + + + + + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on Kinetics-400
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
Returns:

MC3 Network definition

+
Return type:

nn.Module

+
+
+ +
+
+

ResNet (2+1)D

+
+
+torchvision.models.video.r2plus1d_18(pretrained=False, progress=True, **kwargs)[source]
+

Constructor for the 18 layer deep R(2+1)D network as in +https://arxiv.org/abs/1711.11248

+ +++ + + + + + + + +
Parameters:
    +
  • pretrained (bool) – If True, returns a model pre-trained on Kinetics-400
  • +
  • progress (bool) – If True, displays a progress bar of the download to stderr
  • +
+
Returns:

R(2+1)D-18 network

+
Return type:

nn.Module

+
+
+ +
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/objects.inv b/docs/1.7.0/torchvision/objects.inv new file mode 100644 index 000000000000..067b761010e2 Binary files /dev/null and b/docs/1.7.0/torchvision/objects.inv differ diff --git a/docs/1.7.0/torchvision/ops.html b/docs/1.7.0/torchvision/ops.html new file mode 100644 index 000000000000..0e3ac060f339 --- /dev/null +++ b/docs/1.7.0/torchvision/ops.html @@ -0,0 +1,1082 @@ + + + + + + + + + + + + torchvision.ops — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision.ops

+

torchvision.ops implements operators that are specific for Computer Vision.

+
+

Note

+

All operators have native support for TorchScript.

+
+
+
+torchvision.ops.nms(boxes: torch.Tensor, scores: torch.Tensor, iou_threshold: float) → torch.Tensor[source]
+

Performs non-maximum suppression (NMS) on the boxes according +to their intersection-over-union (IoU).

+

NMS iteratively removes lower scoring boxes which have an +IoU greater than iou_threshold with another (higher scoring) +box.

+

If multiple boxes have the exact same score and satisfy the IoU +criterion with respect to a reference box, the selected box is +not guaranteed to be the same between CPU and GPU. This is similar +to the behavior of argsort in PyTorch when repeated values are present.

+ +++ + + + + + + + +
Parameters:
    +
  • boxes (Tensor[N, 4])) – boxes to perform NMS on. They +are expected to be in (x1, y1, x2, y2) format
  • +
  • scores (Tensor[N]) – scores for each one of the boxes
  • +
  • iou_threshold (float) – discards all overlapping +boxes with IoU > iou_threshold
  • +
+
Returns:

keep – int64 tensor with the indices +of the elements that have been kept +by NMS, sorted in decreasing order of scores

+
Return type:

Tensor

+
+
+ +
+
+torchvision.ops.batched_nms(boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float) → torch.Tensor[source]
+

Performs non-maximum suppression in a batched fashion.

+

Each index value correspond to a category, and NMS +will not be applied between elements of different categories.

+ +++ + + + + + + + +
Parameters:
    +
  • boxes (Tensor[N, 4]) – boxes where NMS will be performed. They +are expected to be in (x1, y1, x2, y2) format
  • +
  • scores (Tensor[N]) – scores for each one of the boxes
  • +
  • idxs (Tensor[N]) – indices of the categories for each one of the boxes.
  • +
  • iou_threshold (float) – discards all overlapping boxes +with IoU > iou_threshold
  • +
+
Returns:

keep – int64 tensor with the indices of +the elements that have been kept by NMS, sorted +in decreasing order of scores

+
Return type:

Tensor

+
+
+ +
+
+torchvision.ops.remove_small_boxes(boxes: torch.Tensor, min_size: float) → torch.Tensor[source]
+

Remove boxes which contains at least one side smaller than min_size.

+ +++ + + + + + + + +
Parameters:
    +
  • boxes (Tensor[N, 4]) – boxes in (x1, y1, x2, y2) format
  • +
  • min_size (float) – minimum size
  • +
+
Returns:

+
indices of the boxes that have both sides
+

larger than min_size

+
+
+

+
Return type:

keep (Tensor[K])

+
+
+ +
+
+torchvision.ops.clip_boxes_to_image(boxes: torch.Tensor, size: Tuple[int, int]) → torch.Tensor[source]
+

Clip boxes so that they lie inside an image of size size.

+ +++ + + + + + +
Parameters:
    +
  • boxes (Tensor[N, 4]) – boxes in (x1, y1, x2, y2) format
  • +
  • size (Tuple[height, width]) – size of the image
  • +
+
Returns:

clipped_boxes (Tensor[N, 4])

+
+
+ +
+
+torchvision.ops.box_convert(boxes: torch.Tensor, in_fmt: str, out_fmt: str) → torch.Tensor[source]
+

Converts boxes from given in_fmt to out_fmt. +Supported in_fmt and out_fmt are:

+

‘xyxy’: boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.

+

‘xywh’ : boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height.

+

‘cxcywh’ : boxes are represented via centre, width and height, cx, cy being center of box, w, h +being width and height.

+ +++ + + + + + + + +
Parameters:
    +
  • boxes (Tensor[N, 4]) – boxes which will be converted.
  • +
  • in_fmt (str) – Input format of given boxes. Supported formats are [‘xyxy’, ‘xywh’, ‘cxcywh’].
  • +
  • out_fmt (str) – Output format of given boxes. Supported formats are [‘xyxy’, ‘xywh’, ‘cxcywh’]
  • +
+
Returns:

Boxes into converted format.

+
Return type:

boxes (Tensor[N, 4])

+
+
+ +
+
+torchvision.ops.box_area(boxes: torch.Tensor) → torch.Tensor[source]
+

Computes the area of a set of bounding boxes, which are specified by its +(x1, y1, x2, y2) coordinates.

+ +++ + + + + + + + +
Parameters:boxes (Tensor[N, 4]) – boxes for which the area will be computed. They +are expected to be in (x1, y1, x2, y2) format
Returns:area for each box
Return type:area (Tensor[N])
+
+ +
+
+torchvision.ops.box_iou(boxes1: torch.Tensor, boxes2: torch.Tensor) → torch.Tensor[source]
+

Return intersection-over-union (Jaccard index) of boxes.

+

Both sets of boxes are expected to be in (x1, y1, x2, y2) format.

+ +++ + + + + + + + +
Parameters:
    +
  • boxes1 (Tensor[N, 4]) –
  • +
  • boxes2 (Tensor[M, 4]) –
  • +
+
Returns:

the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2

+
Return type:

iou (Tensor[N, M])

+
+
+ +
+
+torchvision.ops.generalized_box_iou(boxes1: torch.Tensor, boxes2: torch.Tensor) → torch.Tensor[source]
+

Return generalized intersection-over-union (Jaccard index) of boxes.

+

Both sets of boxes are expected to be in (x1, y1, x2, y2) format.

+ +++ + + + + + + + +
Parameters:
    +
  • boxes1 (Tensor[N, 4]) –
  • +
  • boxes2 (Tensor[M, 4]) –
  • +
+
Returns:

the NxM matrix containing the pairwise generalized_IoU values +for every element in boxes1 and boxes2

+
Return type:

generalized_iou (Tensor[N, M])

+
+
+ +
+
+torchvision.ops.roi_align(input: torch.Tensor, boxes: torch.Tensor, output_size: None, spatial_scale: float = 1.0, sampling_ratio: int = -1, aligned: bool = False) → torch.Tensor[source]
+

Performs Region of Interest (RoI) Align operator described in Mask R-CNN

+ +++ + + + + + +
Parameters:
    +
  • input (Tensor[N, C, H, W]) – input tensor
  • +
  • boxes (Tensor[K, 5] or List[Tensor[L, 4]]) – the box coordinates in (x1, y1, x2, y2) +format where the regions will be taken from. If a single Tensor is passed, +then the first column should contain the batch index. If a list of Tensors +is passed, then each Tensor will correspond to the boxes for an element i +in a batch
  • +
  • output_size (int or Tuple[int, int]) – the size of the output after the cropping +is performed, as (height, width)
  • +
  • spatial_scale (float) – a scaling factor that maps the input coordinates to +the box coordinates. Default: 1.0
  • +
  • sampling_ratio (int) – number of sampling points in the interpolation grid +used to compute the output value of each pooled output bin. If > 0, +then exactly sampling_ratio x sampling_ratio grid points are used. If +<= 0, then an adaptive number of grid points are used (computed as +ceil(roi_width / pooled_w), and likewise for height). Default: -1
  • +
  • aligned (bool) – If False, use the legacy implementation. +If True, pixel shift it by -0.5 for align more perfectly about two neighboring pixel indices. +This version in Detectron2
  • +
+
Returns:

output (Tensor[K, C, output_size[0], output_size[1]])

+
+
+ +
+
+torchvision.ops.ps_roi_align(input: torch.Tensor, boxes: torch.Tensor, output_size: int, spatial_scale: float = 1.0, sampling_ratio: int = -1) → torch.Tensor[source]
+

Performs Position-Sensitive Region of Interest (RoI) Align operator +mentioned in Light-Head R-CNN.

+ +++ + + + + + +
Parameters:
    +
  • input (Tensor[N, C, H, W]) – input tensor
  • +
  • boxes (Tensor[K, 5] or List[Tensor[L, 4]]) – the box coordinates in (x1, y1, x2, y2) +format where the regions will be taken from. If a single Tensor is passed, +then the first column should contain the batch index. If a list of Tensors +is passed, then each Tensor will correspond to the boxes for an element i +in a batch
  • +
  • output_size (int or Tuple[int, int]) – the size of the output after the cropping +is performed, as (height, width)
  • +
  • spatial_scale (float) – a scaling factor that maps the input coordinates to +the box coordinates. Default: 1.0
  • +
  • sampling_ratio (int) – number of sampling points in the interpolation grid +used to compute the output value of each pooled output bin. If > 0 +then exactly sampling_ratio x sampling_ratio grid points are used. +If <= 0, then an adaptive number of grid points are used (computed as +ceil(roi_width / pooled_w), and likewise for height). Default: -1
  • +
+
Returns:

output (Tensor[K, C, output_size[0], output_size[1]])

+
+
+ +
+
+torchvision.ops.roi_pool(input: torch.Tensor, boxes: torch.Tensor, output_size: None, spatial_scale: float = 1.0) → torch.Tensor[source]
+

Performs Region of Interest (RoI) Pool operator described in Fast R-CNN

+ +++ + + + + + +
Parameters:
    +
  • input (Tensor[N, C, H, W]) – input tensor
  • +
  • boxes (Tensor[K, 5] or List[Tensor[L, 4]]) – the box coordinates in (x1, y1, x2, y2) +format where the regions will be taken from. If a single Tensor is passed, +then the first column should contain the batch index. If a list of Tensors +is passed, then each Tensor will correspond to the boxes for an element i +in a batch
  • +
  • output_size (int or Tuple[int, int]) – the size of the output after the cropping +is performed, as (height, width)
  • +
  • spatial_scale (float) – a scaling factor that maps the input coordinates to +the box coordinates. Default: 1.0
  • +
+
Returns:

output (Tensor[K, C, output_size[0], output_size[1]])

+
+
+ +
+
+torchvision.ops.ps_roi_pool(input: torch.Tensor, boxes: torch.Tensor, output_size: int, spatial_scale: float = 1.0) → torch.Tensor[source]
+

Performs Position-Sensitive Region of Interest (RoI) Pool operator +described in R-FCN

+ +++ + + + + + +
Parameters:
    +
  • input (Tensor[N, C, H, W]) – input tensor
  • +
  • boxes (Tensor[K, 5] or List[Tensor[L, 4]]) – the box coordinates in (x1, y1, x2, y2) +format where the regions will be taken from. If a single Tensor is passed, +then the first column should contain the batch index. If a list of Tensors +is passed, then each Tensor will correspond to the boxes for an element i +in a batch
  • +
  • output_size (int or Tuple[int, int]) – the size of the output after the cropping +is performed, as (height, width)
  • +
  • spatial_scale (float) – a scaling factor that maps the input coordinates to +the box coordinates. Default: 1.0
  • +
+
Returns:

output (Tensor[K, C, output_size[0], output_size[1]])

+
+
+ +
+
+torchvision.ops.deform_conv2d(input: torch.Tensor, offset: torch.Tensor, weight: torch.Tensor, bias: Union[torch.Tensor, NoneType] = None, stride: Tuple[int, int] = (1, 1), padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1)) → torch.Tensor[source]
+

Performs Deformable Convolution, described in Deformable Convolutional Networks

+ +++ + + + + + + + +
Parameters:
    +
  • input (Tensor[batch_size, in_channels, in_height, in_width]) – input tensor
  • +
  • (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, (offset) – out_height, out_width]): offsets to be applied for each position in the +convolution kernel.
  • +
  • weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]) – convolution weights, split into groups of size (in_channels // groups)
  • +
  • bias (Tensor[out_channels]) – optional bias of shape (out_channels,). Default: None
  • +
  • stride (int or Tuple[int, int]) – distance between convolution centers. Default: 1
  • +
  • padding (int or Tuple[int, int]) – height/width of padding of zeroes around +each image. Default: 0
  • +
  • dilation (int or Tuple[int, int]) – the spacing between kernel elements. Default: 1
  • +
+
Returns:

result of convolution

+
Return type:

output (Tensor[batch_sz, out_channels, out_h, out_w])

+
+
+
Examples::
+
>>> input = torch.rand(4, 3, 10, 10)
+>>> kh, kw = 3, 3
+>>> weight = torch.rand(5, 3, kh, kw)
+>>> # offset should have the same spatial size as the output
+>>> # of the convolution. In this case, for an input of 10, stride of 1
+>>> # and kernel size of 3, without padding, the output size is 8
+>>> offset = torch.rand(4, 2 * kh * kw, 8, 8)
+>>> out = deform_conv2d(input, offset, weight)
+>>> print(out.shape)
+>>> # returns
+>>>  torch.Size([4, 5, 8, 8])
+
+
+
+
+
+ +
+
+class torchvision.ops.RoIAlign(output_size: None, spatial_scale: float, sampling_ratio: int, aligned: bool = False)[source]
+

See roi_align

+
+ +
+
+class torchvision.ops.PSRoIAlign(output_size: int, spatial_scale: float, sampling_ratio: int)[source]
+

See ps_roi_align

+
+ +
+
+class torchvision.ops.RoIPool(output_size: None, spatial_scale: float)[source]
+

See roi_pool

+
+ +
+
+class torchvision.ops.PSRoIPool(output_size: int, spatial_scale: float)[source]
+

See ps_roi_pool

+
+ +
+
+class torchvision.ops.DeformConv2d(in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, padding: int = 0, dilation: int = 1, groups: int = 1, bias: bool = True)[source]
+

See deform_conv2d

+
+ +
+
+class torchvision.ops.MultiScaleRoIAlign(featmap_names: List[str], output_size: Union[int, Tuple[int], List[int]], sampling_ratio: int)[source]
+

Multi-scale RoIAlign pooling, which is useful for detection with or without FPN.

+

It infers the scale of the pooling via the heuristics present in the FPN paper.

+ +++ + + + +
Parameters:
    +
  • featmap_names (List[str]) – the names of the feature maps that will be used +for the pooling.
  • +
  • output_size (List[Tuple[int, int]] or List[int]) – output size for the pooled region
  • +
  • sampling_ratio (int) – sampling ratio for ROIAlign
  • +
+
+

Examples:

+
>>> m = torchvision.ops.MultiScaleRoIAlign(['feat1', 'feat3'], 3, 2)
+>>> i = OrderedDict()
+>>> i['feat1'] = torch.rand(1, 5, 64, 64)
+>>> i['feat2'] = torch.rand(1, 5, 32, 32)  # this feature won't be used in the pooling
+>>> i['feat3'] = torch.rand(1, 5, 16, 16)
+>>> # create some random bounding boxes
+>>> boxes = torch.rand(6, 4) * 256; boxes[:, 2:] += boxes[:, :2]
+>>> # original image size, before computing the feature maps
+>>> image_sizes = [(512, 512)]
+>>> output = m(i, [boxes], image_sizes)
+>>> print(output.shape)
+>>> torch.Size([6, 5, 3, 3])
+
+
+
+ +
+
+class torchvision.ops.FeaturePyramidNetwork(in_channels_list: List[int], out_channels: int, extra_blocks: Union[torchvision.ops.feature_pyramid_network.ExtraFPNBlock, NoneType] = None)[source]
+

Module that adds a FPN from on top of a set of feature maps. This is based on +“Feature Pyramid Network for Object Detection”.

+

The feature maps are currently supposed to be in increasing depth +order.

+

The input to the model is expected to be an OrderedDict[Tensor], containing +the feature maps on top of which the FPN will be added.

+ +++ + + + +
Parameters:
    +
  • in_channels_list (list[int]) – number of channels for each feature map that +is passed to the module
  • +
  • out_channels (int) – number of channels of the FPN representation
  • +
  • extra_blocks (ExtraFPNBlock or None) – if provided, extra operations will +be performed. It is expected to take the fpn features, the original +features and the names of the original features as input, and returns +a new list of feature maps and their corresponding names
  • +
+
+

Examples:

+
>>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5)
+>>> # get some dummy data
+>>> x = OrderedDict()
+>>> x['feat0'] = torch.rand(1, 10, 64, 64)
+>>> x['feat2'] = torch.rand(1, 20, 16, 16)
+>>> x['feat3'] = torch.rand(1, 30, 8, 8)
+>>> # compute the FPN on top of x
+>>> output = m(x)
+>>> print([(k, v.shape) for k, v in output.items()])
+>>> # returns
+>>>   [('feat0', torch.Size([1, 5, 64, 64])),
+>>>    ('feat2', torch.Size([1, 5, 16, 16])),
+>>>    ('feat3', torch.Size([1, 5, 8, 8]))]
+
+
+
+ +
+ + +
+ +
+ + +
+
+ +
+
+
+ + +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/py-modindex.html b/docs/1.7.0/torchvision/py-modindex.html new file mode 100644 index 000000000000..2df8b814936a --- /dev/null +++ b/docs/1.7.0/torchvision/py-modindex.html @@ -0,0 +1,573 @@ + + + + + + + + + + + + Python Module Index — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+ + +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/search.html b/docs/1.7.0/torchvision/search.html new file mode 100644 index 000000000000..71167903c10d --- /dev/null +++ b/docs/1.7.0/torchvision/search.html @@ -0,0 +1,565 @@ + + + + + + + + + + + + Search — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ + + + +
+ +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2017, Torch Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/searchindex.js b/docs/1.7.0/torchvision/searchindex.js new file mode 100644 index 000000000000..dacf0cce51af --- /dev/null +++ b/docs/1.7.0/torchvision/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["datasets","index","io","models","ops","transforms","utils"],envversion:53,filenames:["datasets.rst","index.rst","io.rst","models.rst","ops.rst","transforms.rst","utils.rst"],objects:{"":{torchvision:[1,0,0,"-"]},"torchvision.datasets":{CIFAR100:[0,1,1,""],CIFAR10:[0,1,1,""],CelebA:[0,1,1,""],Cityscapes:[0,1,1,""],CocoCaptions:[0,1,1,""],CocoDetection:[0,1,1,""],DatasetFolder:[0,1,1,""],EMNIST:[0,1,1,""],FakeData:[0,1,1,""],FashionMNIST:[0,1,1,""],Flickr30k:[0,1,1,""],Flickr8k:[0,1,1,""],HMDB51:[0,1,1,""],ImageFolder:[0,1,1,""],ImageNet:[0,1,1,""],KMNIST:[0,1,1,""],Kinetics400:[0,1,1,""],LSUN:[0,1,1,""],MNIST:[0,1,1,""],Omniglot:[0,1,1,""],PhotoTour:[0,1,1,""],Places365:[0,1,1,""],QMNIST:[0,1,1,""],SBDataset:[0,1,1,""],SBU:[0,1,1,""],STL10:[0,1,1,""],SVHN:[0,1,1,""],UCF101:[0,1,1,""],USPS:[0,1,1,""],VOCDetection:[0,1,1,""],VOCSegmentation:[0,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[0,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[0,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[0,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[0,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[0,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[0,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[0,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[0,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[0,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[0,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[0,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[0,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[0,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[0,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[0,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[0,2,1,""]},"torchvision.io":{decode_image:[2,3,1,""],encode_jpeg:[2,3,1,""],encode_png:[2,3,1,""],read_image:[2,3,1,""],read_video:[2,3,1,""],read_video_timestamps:[2,3,1,""],write_jpeg:[2,3,1,""],write_png:[2,3,1,""],write_video:[2,3,1,""]},"torchvision.models":{alexnet:[3,3,1,""],densenet121:[3,3,1,""],densenet161:[3,3,1,""],densenet169:[3,3,1,""],densenet201:[3,3,1,""],googlenet:[3,3,1,""],inception_v3:[3,3,1,""],mnasnet0_5:[3,3,1,""],mnasnet0_75:[3,3,1,""],mnasnet1_0:[3,3,1,""],mnasnet1_3:[3,3,1,""],mobilenet_v2:[3,3,1,""],resnet101:[3,3,1,""],resnet152:[3,3,1,""],resnet18:[3,3,1,""],resnet34:[3,3,1,""],resnet50:[3,3,1,""],resnext101_32x8d:[3,3,1,""],resnext50_32x4d:[3,3,1,""],shufflenet_v2_x0_5:[3,3,1,""],shufflenet_v2_x1_0:[3,3,1,""],shufflenet_v2_x1_5:[3,3,1,""],shufflenet_v2_x2_0:[3,3,1,""],squeezenet1_0:[3,3,1,""],squeezenet1_1:[3,3,1,""],vgg11:[3,3,1,""],vgg11_bn:[3,3,1,""],vgg13:[3,3,1,""],vgg13_bn:[3,3,1,""],vgg16:[3,3,1,""],vgg16_bn:[3,3,1,""],vgg19:[3,3,1,""],vgg19_bn:[3,3,1,""],wide_resnet101_2:[3,3,1,""],wide_resnet50_2:[3,3,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[3,3,1,""],keypointrcnn_resnet50_fpn:[3,3,1,""],maskrcnn_resnet50_fpn:[3,3,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[3,3,1,""],deeplabv3_resnet50:[3,3,1,""],fcn_resnet101:[3,3,1,""],fcn_resnet50:[3,3,1,""]},"torchvision.models.video":{mc3_18:[3,3,1,""],r2plus1d_18:[3,3,1,""],r3d_18:[3,3,1,""]},"torchvision.ops":{DeformConv2d:[4,1,1,""],FeaturePyramidNetwork:[4,1,1,""],MultiScaleRoIAlign:[4,1,1,""],PSRoIAlign:[4,1,1,""],PSRoIPool:[4,1,1,""],RoIAlign:[4,1,1,""],RoIPool:[4,1,1,""],batched_nms:[4,3,1,""],box_area:[4,3,1,""],box_convert:[4,3,1,""],box_iou:[4,3,1,""],clip_boxes_to_image:[4,3,1,""],deform_conv2d:[4,3,1,""],generalized_box_iou:[4,3,1,""],nms:[4,3,1,""],ps_roi_align:[4,3,1,""],ps_roi_pool:[4,3,1,""],remove_small_boxes:[4,3,1,""],roi_align:[4,3,1,""],roi_pool:[4,3,1,""]},"torchvision.transforms":{CenterCrop:[5,1,1,""],ColorJitter:[5,1,1,""],Compose:[5,1,1,""],ConvertImageDtype:[5,1,1,""],FiveCrop:[5,1,1,""],GaussianBlur:[5,1,1,""],Grayscale:[5,1,1,""],Lambda:[5,1,1,""],LinearTransformation:[5,1,1,""],Normalize:[5,1,1,""],Pad:[5,1,1,""],RandomAffine:[5,1,1,""],RandomApply:[5,1,1,""],RandomChoice:[5,1,1,""],RandomCrop:[5,1,1,""],RandomErasing:[5,1,1,""],RandomGrayscale:[5,1,1,""],RandomHorizontalFlip:[5,1,1,""],RandomOrder:[5,1,1,""],RandomPerspective:[5,1,1,""],RandomResizedCrop:[5,1,1,""],RandomRotation:[5,1,1,""],RandomSizedCrop:[5,1,1,""],RandomVerticalFlip:[5,1,1,""],Resize:[5,1,1,""],Scale:[5,1,1,""],TenCrop:[5,1,1,""],ToPILImage:[5,1,1,""],ToTensor:[5,1,1,""],functional:[5,0,0,"-"]},"torchvision.transforms.CenterCrop":{forward:[5,2,1,""]},"torchvision.transforms.ColorJitter":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.FiveCrop":{forward:[5,2,1,""]},"torchvision.transforms.GaussianBlur":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.Grayscale":{forward:[5,2,1,""]},"torchvision.transforms.LinearTransformation":{forward:[5,2,1,""]},"torchvision.transforms.Normalize":{forward:[5,2,1,""]},"torchvision.transforms.Pad":{forward:[5,2,1,""]},"torchvision.transforms.RandomAffine":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.RandomCrop":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.RandomErasing":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.RandomGrayscale":{forward:[5,2,1,""]},"torchvision.transforms.RandomHorizontalFlip":{forward:[5,2,1,""]},"torchvision.transforms.RandomPerspective":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.RandomResizedCrop":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.RandomRotation":{forward:[5,2,1,""],get_params:[5,4,1,""]},"torchvision.transforms.RandomVerticalFlip":{forward:[5,2,1,""]},"torchvision.transforms.Resize":{forward:[5,2,1,""]},"torchvision.transforms.TenCrop":{forward:[5,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[5,3,1,""],adjust_contrast:[5,3,1,""],adjust_gamma:[5,3,1,""],adjust_hue:[5,3,1,""],adjust_saturation:[5,3,1,""],affine:[5,3,1,""],center_crop:[5,3,1,""],convert_image_dtype:[5,3,1,""],crop:[5,3,1,""],erase:[5,3,1,""],five_crop:[5,3,1,""],gaussian_blur:[5,3,1,""],hflip:[5,3,1,""],normalize:[5,3,1,""],pad:[5,3,1,""],perspective:[5,3,1,""],pil_to_tensor:[5,3,1,""],resize:[5,3,1,""],resized_crop:[5,3,1,""],rgb_to_grayscale:[5,3,1,""],rotate:[5,3,1,""],ten_crop:[5,3,1,""],to_grayscale:[5,3,1,""],to_pil_image:[5,3,1,""],to_tensor:[5,3,1,""],vflip:[5,3,1,""]},"torchvision.utils":{make_grid:[6,3,1,""],save_image:[6,3,1,""]},torchvision:{get_image_backend:[1,3,1,""],set_image_backend:[1,3,1,""],set_video_backend:[1,3,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"],"4":["py","staticmethod","Python static method"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function","4":"py:staticmethod"},terms:{"10k":0,"16x112x112":3,"1x1":3,"224x224":3,"32x4d":3,"32x8d":3,"3rd":1,"427l":0,"4th":0,"50k":0,"50x":3,"5mb":3,"60k":0,"640l":0,"boolean":[0,5],"break":[1,5],"byte":2,"case":[0,2,4,5],"class":[0,3,4,5],"default":[0,2,3,4,5,6],"enum":5,"export":3,"final":[3,5,6],"float":[2,4,5,6],"function":[0,1,2,3],"import":[0,2,3,5],"int":[0,2,3,4,5,6],"long":1,"new":4,"return":[0,2,3,4,5],"short":5,"static":5,"switch":3,"true":[0,3,4,5,6],"try":5,"while":5,For:[0,1,3,5],Going:3,NMS:4,One:[0,3],That:5,The:[0,1,2,3,4,5,6],Then:5,These:[0,1,3,5],Use:5,Will:5,__background__:3,__call__:5,__getitem__:0,__init__:5,__len__:0,_audio_channel:0,_audio_sampl:0,_precomputed_metadata:0,_video_height:0,_video_min_dimens:0,_video_width:0,about:[2,4,5],abov:[0,5],abs:[3,5],absolut:5,acc:3,accept:5,accimag:1,accord:[0,3,4],accordingli:[0,3,5],accuraci:3,across:[0,1],act:5,action:[0,3],adapt:4,add:[3,4],added:4,addit:2,addition:5,address:3,adjust:5,adjust_bright:5,adjust_contrast:5,adjust_gamma:5,adjust_hu:5,adjust_satur:5,aeroplan:3,affin:5,afram:2,after:[4,5],again:0,against:5,aggreg:3,ahead:1,airplan:3,align:4,all:[0,2,3,4,5,6],alloc:2,allow:0,almost:0,alphabet:0,alreadi:0,also:[0,1,5],although:1,alwai:[5,6],amount:[5,6],angl:5,ani:[0,2,5],ann_fil:0,annfil:0,annot:0,annotation_path:0,anoth:4,api:[0,1],append:3,appl:3,appli:[4,5],applic:5,appropri:[2,3],arbitrari:5,architectur:[1,3],archiv:0,area:[4,5],arg:[0,5],argsort:4,argument:[0,2,5,6],around:[4,5],arrai:0,arrang:0,arxiv:[3,5],asd932_:0,aspect:5,assign:0,assum:[3,5],assumpt:5,attr:0,attribut:[0,2],audio:[0,2],audio_fp:2,augment:5,author:0,aux_logit:3,aux_loss:3,auxiliari:3,avail:[0,1,5],avg:5,avi:0,avoid:5,awar:3,axi:5,back:5,backbon:3,backend:1,background:[0,3],backpack:3,backward:[1,5],balanc:0,ball:3,banana:3,band:5,bar:3,base:[0,1,4,5],basebal:3,bat:3,batch:[0,3,4,5,6],batch_siz:[0,4],batch_sz:4,batched_nm:4,bbox:0,bear:3,becaus:[1,3,5],bed:3,bedroom_train:0,been:[3,4],befor:4,behavior:[3,4,5],behind:[0,1],being:[4,5],belong:5,below:5,bench:3,beta:1,between:[0,2,3,4,5],bia:4,bicub:5,bicycl:3,bilinear:5,bin:4,binari:[0,1,3],binaryio:6,bind:1,bird:3,bl_flip:5,black:5,block:3,blue:0,blur:5,boat:3,book:3,bool:[0,3,4,5,6],border:5,both:[0,2,3,4,5],bottl:3,bottleneck:3,bottom:[4,5],bound:[0,4],boundari:0,bowl:3,box:[0,3,4,5],box_area:4,box_convert:4,box_iou:4,boxes1:4,boxes2:4,br_flip:5,branch:3,bright:[0,5],brightness_factor:5,broccoli:3,buffer:2,build:5,bus:3,byclass:0,bymerg:0,bz2:0,cach:3,cake:3,calcul:5,call:[3,6],callabl:0,can:[0,1,2,3,5],cannot:5,cap:0,car:3,carrot:3,cast:5,cat:[0,3],categori:[0,3,4],ceil:4,celeba:1,celebfac:0,cell:3,center:[4,5],center_crop:5,center_flip:5,centercrop:[3,5],centr:4,central:5,chain:5,chair:3,challendg:0,chang:[1,3,5],channel:[0,2,3,4,5],check:0,choic:5,choos:5,chosen:5,church_outdoor_train:0,chw:2,cifar100:0,cifar10:0,cifar:1,cityscap:1,class_i:0,class_index:0,class_x:0,classif:[0,1],classifi:1,clip:[0,3,4],clip_boxes_to_imag:4,clipped_box:4,clock:3,clockwis:5,cmyk:5,cnn:4,coars:0,coco:[1,3],coco_instance_category_nam:3,coco_person_keypoint_nam:3,coco_util:3,cococapt:0,cocodetect:0,code:3,col:0,collect:0,color:[0,5],colorjitt:5,column:[4,5],come:0,commit:1,common:[0,1,5],compar:2,compat:[0,1,5],complementari:5,complet:[1,5],complex:5,compon:[0,5],compos:[3,5],composit:1,compress:2,compression_level:2,comput:[1,3,4,5,6],concret:3,conda:1,configur:3,connect:3,consecut:5,consid:[0,5],consist:1,constant:5,construct:3,constructor:[2,3],contain:[0,2,3,4,5],content:2,contrail:0,contrast:[3,5],contrast_factor:5,control:[2,5],convers:1,convert:[4,5],convert_image_dtyp:5,convertimagedtyp:5,convolut:4,coordin:[4,5],corner:[4,5],correct:5,correspond:[2,3,4,5],corrupt:0,couch:3,counter:5,covari:5,cover:0,coverag:1,cow:3,cpu:4,creat:[0,4,5],creation:0,criterion:4,crop:[3,4,5],cuda:3,cudnn:3,cup:3,current:[0,2,4],custom:[1,5],cxcywh:4,cyclic:5,dark:5,darker:5,dart:0,data1:0,data2:0,data:[0,4,5],data_load:0,databas:0,dataload:0,dataset:[1,3,5],datasetfold:1,datset:0,deactiv:5,deal:5,decid:0,decod:[1,2],decode_imag:2,decreas:4,deep:3,deeper:3,deeplabv3_resnet101:3,deeplabv3_resnet50:3,def:5,default_load:0,defin:[0,5],definit:3,deform:4,deform_conv2d:4,deformconv2d:4,degre:5,denot:5,dens:3,densenet121:3,densenet161:3,densenet169:3,densenet201:3,depend:3,deprec:5,depth:[3,4,5],deriv:5,describ:[1,3,4],descriptor:0,design:3,desir:5,detail:[3,5],detect:[1,2,4],detectron2:4,determin:[5,6],determinist:5,deviat:5,devic:1,devkit:0,dict:[2,3],dictionari:[0,2,3],differ:[0,3,4],digit:0,dilat:4,dimens:5,dimension:2,dine:3,diningt:3,dir:0,direct:5,directori:[0,3],discard:4,discuss:3,displai:[3,6],distanc:[0,4],distort:5,distortion_scal:5,distribut:1,document:[1,6],doe:[1,2,5],dog:[0,3],don:5,done:5,donut:3,dot:5,download:[0,3],dpython:5,drier:3,drop:0,dset:0,dtype:[0,5],dummi:4,durat:2,dure:3,each:[0,2,3,4,5,6],earli:1,edg:5,effect:5,effici:3,either:2,element:[0,4,5],eleph:3,els:[0,5],emit:0,emnist:1,empti:0,encode_jpeg:2,encode_png:2,end:2,end_pt:2,endpoint:5,enhanc:5,enough:5,ensur:3,entir:5,entiti:0,entri:0,environ:3,equat:5,equival:3,eras:5,error:[3,5],etc:5,eval:3,evalu:[0,3],everi:[0,2,3,4],exact:4,exactli:[0,4,5],exampl:[0,2,3,4,5,6],except:[1,3,5],exclud:0,exist:0,expand:5,expans:5,expect:[0,1,3,4,5],experi:3,explicitli:2,ext:0,extens:[0,6],extra:[0,4],extra_block:4,extract:0,extrafpnblock:4,factor:[2,4,5],fake:0,fakedata:1,fals:[0,3,4,5,6],fashion:[1,4],fashionmnist:0,fast:4,faster:1,faster_rcnn:3,fasterrcnn_resnet50_fpn:3,favor:5,fcn:[3,4],fcn_resnet101:3,fcn_resnet50:3,feat0:4,feat1:4,feat2:4,feat3:4,featmap_nam:4,featur:[1,4],feature_pyramid_network:4,featurepyramidnetwork:4,feedback:1,fewer:3,ffmpeg:1,field:[2,3],file:[0,2,6],filenam:[2,6],fill:5,fillcolor:5,filter:5,fine:[0,1,5],fire:3,first:[0,4,5],five_crop:5,fivecrop:5,fix:[0,3,5],flag:[1,5],flatten:5,flickr30k:0,flickr8k:0,flickr:1,flip:5,float32:5,float64:5,floattensor:[3,5],fly:0,fold:0,folder:0,follow:[0,2,3,5],fork:3,format:[0,2,3,4,5,6],forth:5,forward:5,found:3,four:5,fpn:[3,4],fps:2,fraction:[2,5],frame:[0,2,3],frame_r:0,frames_per_clip:0,framework:1,frisbe:3,from:[0,2,3,4,5,6],frozen:3,full:0,fulli:2,fuse:5,gain:5,gamma:5,gap:1,gaussian:5,gaussian_blur:5,gaussianblur:5,gener:[0,1,3,4],generalized_box_i:4,generalized_i:4,get:[0,1,4,5],get_image_backend:1,get_metadata:2,get_param:5,giraff:3,give:[0,5],given:[0,1,4,5,6],glass:3,global:3,glove:3,gpu:[3,4],grai:5,grain:[1,5],grayscal:[2,5],greater:4,grid:[4,6],ground:3,group:4,gtcoars:0,gtfine:0,guarante:4,guidelin:3,hair:3,handbag:3,handl:0,happen:1,has:[0,3,5],have:[0,3,4,5],head:4,height:[0,4,5],henc:0,here:[0,3,6],heurist:4,hflip:5,high:[0,2],higher:4,hmdb51:1,hold:5,horizont:5,hors:3,hot:3,how:[3,5],howev:[0,1],hsv:5,http:[3,5],hue:5,hue_factor:5,hydrant:3,ident:[0,5],idx:4,imag:[0,1,3,4,6],image_height:2,image_s:[0,4],image_set:0,image_width:2,imagefold:1,imagenet:[1,3],imagenet_data:0,imagenet_root:0,img:[0,3,5],img_height:5,img_siz:5,img_width:5,implement:[0,1,3,4,5],improv:[1,3],in_channel:4,in_channels_list:4,in_fmt:4,in_height:4,in_width:4,incept:5,inception_v3:3,includ:[0,1,3],increas:[4,5],index:[0,4],indic:4,individu:2,infer:[3,4],info:2,inform:[0,2,3,5],initi:3,inplac:5,input:[0,2,3,4,5],insid:4,inspect:2,inst:0,instal:[0,3],instanc:[0,1,2],instead:[0,5,6],int32:5,int64:[4,5],int64tensor:3,int8:2,integ:5,intel:1,intens:5,interest:[0,4],intern:[0,3],internet:0,interpol:[4,5],interpret:[2,5],intersect:4,interv:5,invari:5,invert:3,iou:[3,4],iou_threshold:4,ipp:1,is_valid_fil:0,item:[0,4],iter:4,its:[0,2,3,4,5],jaccard:4,jit:5,jitter:5,jpeg:2,json:0,keep:[4,5],kept:4,kernel:[4,5],kernel_height:4,kernel_s:[4,5],kernel_width:4,keyboard:3,keypoint:1,keypoint_rcnn:3,keypointrcnn_resnet50_fpn:3,kinet:[1,3],kinetics400:0,kite:3,kmnist:1,knife:3,known:5,ksize:5,kuzushiji:0,kwarg:[0,3,5,6],lab:5,label:[0,3],lambd:5,lambda:5,landmark:0,laptop:3,larg:[0,3,5],larger:[3,4,5],last:[3,5],law:5,layer:3,layout:2,lead:5,learn:[0,1,3],least:[3,4],leav:0,left:[4,5],left_ankl:3,left_ear:3,left_elbow:3,left_ey:3,left_hip:3,left_kne:3,left_should:3,left_wrist:3,lefteye_i:0,lefteye_x:0,leftimg8bit:0,leftmouth_i:0,leftmouth_x:0,legaci:4,len:[0,3,5],length:[3,5],less:[1,3],letter:0,level:[2,3],librari:1,libx264:2,lie:[4,5],lies:0,light:[3,4],lighter:5,like:[1,2,5],likewis:4,limit:1,linear:3,lineartransform:5,list:[0,2,3,4,5,6],load:[0,1,3],load_url:3,loadann:0,loader:0,local:0,locat:[0,3,5],loop:5,loss:[0,3],lost:3,lower:[2,4],lsun:1,machin:1,made:5,mai:[1,5],maintain:[1,5],major:1,make:[0,5,6],make_grid:6,mani:1,manual_se:5,map:[4,5],mask:[4,5],mask_rcnn:3,maskrcnn_resnet50_fpn:3,mat:0,match:[0,5],matrix:[4,5],max:[5,6],maximum:[4,5],mc3:3,mc3_18:3,mean:[3,5],mean_vector:5,memori:[2,3],memory_effici:3,mention:4,meta:0,metadata:2,meter:3,meth:5,method:[0,2,3,5],microwav:3,might:[0,5],min:[5,6],min_siz:[3,4],mind:5,mini:[3,6],minimum:[3,4,5],mismatch:5,mnasnet0_5:3,mnasnet0_75:3,mnasnet1_0:3,mnasnet1_3:3,mnist:1,mobil:3,mobilenet_v2:3,mobilenetv2:3,mode:[0,3,5],model:[1,4,5],model_zoo:3,modul:[3,4,5],modulelist:5,momet:2,more:[0,2,3,4,5],motorbik:3,motorcycl:3,mountain:0,mous:3,much:5,multi:4,multipl:[0,4,5],multipli:[3,5],multiprocess:0,multiscaleroialign:4,must:[2,5],mutat:5,my_segmentation_transform:5,myrotationtransform:5,name:[0,1,4],nativ:[1,4],ncrop:5,ndarrai:5,nearest:5,need:[0,1],neg:5,neighbor:4,network:[4,5],neural:3,next:0,nist:0,nms:4,non:[4,5],none:[0,2,3,4,5,6],nonetyp:[0,2,4,5,6],normal:[3,5,6],nose:3,nose_i:0,nose_x:0,notabl:5,note:[0,2,3,5],notebook:6,notic:1,now:[3,5],nrow:6,nsdf3:0,nthread:0,num_class:[0,3],num_keypoint:3,num_lin:0,num_output_channel:5,num_work:0,number:[0,2,3,4,5,6],numpi:[0,5],nxm:4,object:[0,1,4,5,6],obtain:3,offici:3,offlin:5,offset:[0,4,5],offset_group:4,often:2,omit:[5,6],omniglot:1,one:[0,1,2,3,4,5],ones:[0,3],onli:[1,3],onnx:3,open:1,oper:[1,2,4,5],oppos:5,ops:1,opset_vers:3,option:[0,2,3,4,5,6],orang:3,order:[3,4,5],ordereddict:4,org:[3,5],origin:[4,5],other:[2,3,5,6],otherwis:[0,3],out:[2,4,5],out_channel:4,out_fmt:4,out_h:4,out_height:4,out_w:4,out_width:4,outer:3,output:[0,2,3,4,5],output_s:[4,5],outsid:5,oven:3,over:[0,4,5,6],overal:5,overflow:5,overheard:0,overlap:4,packag:[1,2],pad:[4,5,6],pad_if_need:5,pad_valu:6,padding_mod:5,pairwis:4,paper:[3,4],parallel:5,parallelli:0,param:[0,3,5],paramet:[0,1,2,3,4,5,6],park:3,part:[0,1],parti:1,particular:0,pascal:[0,3],pass:[0,3,4,5],past:3,path:[0,2,6],pathlib:6,per:[2,3],perfectli:4,perform:[1,2,4,5],perhap:1,person:[0,1],perspect:5,phone:3,photo:0,phototour:1,pic:5,pick:5,pil:[0,1],pil_to_tensor:5,pillow:5,piltotensor:5,pipelin:5,pixel:[0,4,5,6],pixelwis:3,pizza:3,place:[0,5],places365:1,plane:0,plant:3,platform:3,pleas:[0,5],plu:5,plume:0,png:[0,2],point:[0,2,4,5],poli:0,polygon:0,pool:4,pooled_w:4,popular:1,popularli:5,posit:[4,5],possibl:0,post:[3,5],postprocess:3,pot:3,pottedpl:3,power:5,practic:[2,3],pre:[0,3],precis:3,predict:3,preprocess:3,present:[0,2,3,4],preserv:5,pretrain:3,pretrained_backbon:3,previou:5,print:[0,2,4],probabl:5,procedur:0,process:[0,3],produc:5,product:5,progress:3,project:1,proport:5,prototyp:1,provid:[2,3,4,5],ps_roi_align:4,ps_roi_pool:4,psroialign:4,psroipool:4,pts:2,pts_unit:2,put:0,pyav:1,pypi:1,pyramid:4,python:[1,5],pytorch:[0,1,3,4],qmnist:1,qualiti:[0,2],r2plus1d_18:3,r3d:3,r3d_18:3,racket:3,rais:[0,5],rand:[3,4],randint:[3,5],random:[0,3,4,5],random_offset:0,randomaffin:5,randomappli:5,randomchoic:5,randomcrop:[0,5],randomeras:5,randomgrayscal:5,randomhorizontalflip:5,randomli:[0,5],randomord:5,randomperspect:5,randomresizedcrop:5,randomrot:5,randomsizedcrop:5,randomverticalflip:5,rang:[0,3,5,6],rate:[2,3],rather:6,ratio:[4,5],raw:2,read:2,read_imag:2,read_video:2,read_video_timestamp:2,reader:2,reader_md:2,real:5,recal:3,recognit:[0,3],rectangl:5,refer:[1,3,4,5],reflect:5,refriger:3,region:[4,5],regress:3,releas:1,relev:2,remain:0,remot:3,remov:4,remove_small_box:4,repeat:[4,5],repo:3,report:3,repres:[0,4],represent:4,requir:[0,3,5],resampl:5,rescal:5,reshap:5,residu:3,resiz:[0,3,5],resized_crop:5,resnet101:3,resnet152:3,resnet18:3,resnet34:3,resnet3d:3,resnet50:3,resnext101_32x8d:3,resnext50_32x4d:3,resolut:0,respect:[0,4,5],result:[2,3,4,5],result_avg:5,rethink:3,retriev:2,reus:5,revers:5,rgb:[2,3,5],rgb_to_grayscal:5,rgba:5,right:[4,5],right_ankl:3,right_ear:3,right_elbow:3,right_ey:3,right_hip:3,right_kne:3,right_should:3,right_wrist:3,righteye_i:0,righteye_x:0,rightmouth_i:0,rightmouth_x:0,robust:1,roi:4,roi_align:4,roi_pool:4,roi_width:4,roialign:4,roipool:4,root:0,rotat:5,rotation_transform:5,roughli:3,row:6,rpn:3,run:1,runtimeerror:[0,5],sacrif:3,sai:0,same:[0,3,4,5,6],sampl:[0,4,5],sampling_ratio:4,sandwich:3,satisfi:4,satur:5,saturation_factor:5,save:[0,2,6],save_imag:6,sbd:1,sbdataset:0,sbu:1,sbucaptionedphotodataset:0,scale:[0,3,4,5,6],scale_each:6,scale_rang:5,scipi:[0,3],scissor:3,score:[3,4],script:[3,5],scriptabl:1,scripted_transform:5,search:3,sec:2,second:[2,5],see:[1,3,4,5,6],seed:[0,5],seem:5,segment:[0,1,5],select:[0,2,3,4,5],self:5,semant:[0,1,5],sensit:4,separ:6,sequenc:5,sequenti:5,set:[0,2,3,4,5],set_current_stream:2,set_image_backend:1,set_video_backend:1,sever:5,shadow:5,shape:[0,3,4,5,6],shear:5,sheep:3,shift:[4,5,6],should:[0,1,3,4,5,6],shown:5,shuffl:0,shufflenet_v2_x0_5:3,shufflenet_v2_x1_0:3,shufflenet_v2_x1_5:3,shufflenet_v2_x2_0:3,shufflenetv2:3,side:[4,5],sigma:5,sigma_i:5,sigma_max:5,sigma_min:5,sigma_x:5,sign:3,similar:[0,4],sinc:5,singl:[4,5],sink:3,size:[0,3,4,5,6],skateboard:3,ski:3,sky:0,slightli:3,slower:3,small:0,smaller:[4,5],smnt:0,smoke:0,snow:0,snowboard:3,snowi:0,sofa:3,soft:3,solid:5,some:[0,3,4,5],sometim:1,sort:4,sourc:[0,1,2,3,4,5,6],space:[4,5],spatial:4,spatial_scal:4,specif:[2,4],specifi:[0,1,4,5,6],split:[0,3,4],spoon:3,sport:3,squar:5,squeezenet1_0:3,squeezenet1_1:3,stabl:1,stack:5,stage:1,standard:[0,3,5],start:[2,3],start_pt:2,startpoint:5,state:5,statu:1,std:[3,5],stderr:3,step:0,step_between_clip:0,stl10:1,stl10_binari:0,stop:3,store:[0,5],str:[0,2,4,5,6],stream:[0,2],stream_typ:2,stride:4,string:[0,1,6],structur:2,subclass:0,subdir:0,subpackag:3,subset:3,subtract:5,suitcas:3,summari:3,support:[0,1,2,4,5],suppos:[4,5],suppress:4,sure:5,surfboard:3,svd:5,svhn:1,symmetr:5,tabl:3,tag:1,take:[0,2,4],taken:[0,4],tar:0,tarbal:0,target:[0,3,5],target_transform:0,target_typ:0,task:[3,5],teddi:3,ten:5,ten_crop:5,tencrop:5,tenni:3,tensor:[0,1,2,3,4,6],term:1,terminolog:0,test10k:0,test50k:0,test:[0,1,2,3,5],than:[0,1,3,4,5,6],thei:[0,2,3,4,5],them:0,therefor:5,thi:[0,1,2,3,4,5,6],thread:2,three:0,threshold:3,through:1,tie:3,time:[1,2,3],timestamp:2,tl_flip:5,to_grayscal:5,to_pil_imag:5,to_tensor:5,toaster:3,togeth:5,toilet:3,toothbrush:3,top:[0,1,3,4,5],topilimag:5,torch:[0,1,2,3,4,6],torch_model_zoo:3,torchaudio:1,torchelast:1,torchscript:[1,2,4,5],torchserv:1,torchtext:1,totensor:[0,3,5],tr_flip:5,traffic:3,trail:5,train2017:3,train:[0,3,5],train_extra:0,train_nov:0,trainabl:3,trainable_backbone_lay:3,trainval:0,transform:[0,1,3],transform_input:3,transformation_matrix:5,translat:5,tree:0,trick:3,truck:3,truth:3,tupl:[0,2,4,5,6],tvmonitor:3,twice:3,two:[0,2,3,4,5],txhxwxc:0,type:[0,2,3,4,5],typic:1,ucf101:1,uint8:[2,5],uint8tensor:3,umbrella:3,unchang:5,unfortun:3,uniformli:5,union:[0,2,4,5,6],unit:2,unlabel:0,upper:5,use:[0,3,4,5,6],used:[0,1,3,4,5,6],useful:[4,5],user:[1,5],uses:[0,1],using:[0,2,3,5],usp:1,util:[0,1,3],v100:3,val2017:3,val:0,valid:[0,3],valu:[0,2,3,4,5,6],variabl:3,vase:3,vector:[0,5],veri:3,version:[0,4,5],vertic:5,vertical_flip:5,vflip:5,vframe:2,vgg11:3,vgg11_bn:3,vgg13:3,vgg13_bn:3,vgg16:3,vgg16_bn:3,vgg19:3,vgg19_bn:3,via:4,video:[0,1],video_arrai:2,video_classif:3,video_codec:2,video_fp:2,video_path:2,video_read:1,videoclip:0,videoread:2,view:[0,5],visibl:3,vision:[1,3,4],voc2012:0,voc:[1,3],vocdetect:0,vocsegment:0,wai:[0,3],want:[2,3],weight:[3,4],weird:3,well:[2,3,5],what:0,when:[0,3,4,5],where:[0,2,3,4,5,6],whether:[0,2],which:[0,2,3,4,5],whichev:2,whilst:2,white:5,whiten:5,whole:[2,5],whose:0,wide_resnet101_2:3,wide_resnet50_2:3,width:[0,4,5],wine:3,without:[3,4,5],won:4,work:5,worker:0,would:2,write:2,write_jpeg:2,write_png:2,write_video:2,xla:1,xml:0,xxx:0,xxy:0,xxz:0,xywh:4,xyxi:4,ycbcr:5,year:0,yet:[1,5],you:[0,3,5],your:[3,5],zebra:3,zero:[4,5],zhong:5,zip:0},titles:["torchvision.datasets","torchvision","torchvision.io","torchvision.models","torchvision.ops","torchvision.transforms","torchvision.utils"],titleterms:{"function":5,alexnet:3,api:2,caption:0,celeba:0,characterist:3,cifar:0,cityscap:0,classif:3,cnn:3,coco:0,composit:5,convers:5,convolut:3,dataset:0,datasetfold:0,deeplabv3:3,densenet:3,detect:[0,3],emnist:0,fakedata:0,fashion:0,faster:3,fine:2,flickr:0,fulli:3,gener:5,googlenet:3,grain:2,hmdb51:0,imag:[2,5],imagefold:0,imagenet:0,incept:3,instanc:3,keypoint:3,kinet:0,kmnist:0,lsun:0,mask:3,mix:3,mnasnet:3,mnist:0,mobilenet:3,model:3,network:3,object:3,omniglot:0,onli:5,ops:4,person:3,phototour:0,pil:5,places365:0,qmnist:0,resnet:3,resnext:3,retinanet:3,runtim:3,sbd:0,sbu:0,scriptabl:5,segment:3,semant:3,shufflenet:3,squeezenet:3,stl10:0,svhn:0,tensor:5,torch:5,torchvis:[0,1,2,3,4,5,6],transform:5,ucf101:0,usp:0,util:6,vgg:3,video:[2,3],voc:0,wide:3}}) \ No newline at end of file diff --git a/docs/1.7.0/torchvision/transforms.html b/docs/1.7.0/torchvision/transforms.html new file mode 100644 index 000000000000..3f94662d717d --- /dev/null +++ b/docs/1.7.0/torchvision/transforms.html @@ -0,0 +1,2752 @@ + + + + + + + + + + + + torchvision.transforms — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchvision.transforms
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision.transforms

+

Transforms are common image transformations. They can be chained together using Compose. +Additionally, there is the torchvision.transforms.functional module. +Functional transforms give fine-grained control over the transformations. +This is useful if you have to build a more complex transformation pipeline +(e.g. in the case of segmentation tasks).

+

All transformations accept PIL Image, Tensor Image or batch of Tensor Images as input. Tensor Image is a tensor with +(C, H, W) shape, where C is a number of channels, H and W are image height and width. Batch of +Tensor Images is a tensor of (B, C, H, W) shape, where B is a number of images in the batch. Deterministic or +random transformations applied on the batch of Tensor Images identically transform all the images of the batch.

+
+

Warning

+

Since v0.8.0 all random transformations are using torch default random generator to sample random parameters. +It is a backward compatibility breaking change and user should set the random state as following:

+
# Previous versions
+# import random
+# random.seed(12)
+
+# Now
+import torch
+torch.manual_seed(17)
+
+
+

Please, keep in mind that the same seed for torch random generator and Python random generator will not +produce the same results.

+
+
+

Scriptable transforms

+

In order to script the transformations, please use torch.nn.Sequential instead of Compose.

+
transforms = torch.nn.Sequential(
+    transforms.CenterCrop(10),
+    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+)
+scripted_transforms = torch.jit.script(transforms)
+
+
+

Make sure to use only scriptable transformations, i.e. that work with torch.Tensor and does not require +lambda functions or PIL.Image.

+

For any custom transformations to be used with torch.jit.script, they should be derived from torch.nn.Module.

+
+
+

Compositions of transforms

+
+
+class torchvision.transforms.Compose(transforms)[source]
+

Composes several transforms together. This transform does not support torchscript. +Please, see the note below.

+ +++ + + + +
Parameters:transforms (list of Transform objects) – list of transforms to compose.
+

Example

+
>>> transforms.Compose([
+>>>     transforms.CenterCrop(10),
+>>>     transforms.ToTensor(),
+>>> ])
+
+
+
+

Note

+

In order to script the transformations, please use torch.nn.Sequential as below.

+
>>> transforms = torch.nn.Sequential(
+>>>     transforms.CenterCrop(10),
+>>>     transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+>>> )
+>>> scripted_transforms = torch.jit.script(transforms)
+
+
+

Make sure to use only scriptable transformations, i.e. that work with torch.Tensor, does not require +lambda functions or PIL.Image.

+
+
+ +
+
+

Transforms on PIL Image and torch.*Tensor

+
+
+class torchvision.transforms.CenterCrop(size)[source]
+

Crops the given image at the center. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + +
Parameters:size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be cropped.
Returns:Cropped image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0)[source]
+

Randomly change the brightness, contrast and saturation of an image.

+ +++ + + + +
Parameters:
    +
  • brightness (float or tuple of python:float (min, max)) – How much to jitter brightness. +brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness] +or the given [min, max]. Should be non negative numbers.
  • +
  • contrast (float or tuple of python:float (min, max)) – How much to jitter contrast. +contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast] +or the given [min, max]. Should be non negative numbers.
  • +
  • saturation (float or tuple of python:float (min, max)) – How much to jitter saturation. +saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation] +or the given [min, max]. Should be non negative numbers.
  • +
  • hue (float or tuple of python:float (min, max)) – How much to jitter hue. +hue_factor is chosen uniformly from [-hue, hue] or the given [min, max]. +Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Input image.
Returns:Color jittered image.
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(brightness, contrast, saturation, hue)[source]
+

Get a randomized transform to be applied on image.

+

Arguments are same as that of __init__.

+ +++ + + + +
Returns:Transform which randomly adjusts brightness, contrast and +saturation in a random order.
+
+ +
+ +
+
+class torchvision.transforms.FiveCrop(size)[source]
+

Crop the given image into four corners and the central crop. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading +dimensions

+
+

Note

+

This transform returns a tuple of images and there may be a mismatch in the number of +inputs and targets your Dataset returns. See below for an example of how to deal with +this.

+
+ +++ + + + +
Parameters:size (sequence or int) – Desired output size of the crop. If size is an int +instead of sequence like (h, w), a square crop of size (size, size) is made. +If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
+

Example

+
>>> transform = Compose([
+>>>    FiveCrop(size), # this is a list of PIL Images
+>>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
+>>> ])
+>>> #In your test loop you can do the following:
+>>> input, target = batch # input is a 5d tensor, target is 2d
+>>> bs, ncrops, c, h, w = input.size()
+>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
+>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
+
+
+
+
+forward(img)[source]
+
+++ + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be cropped.
Returns:tuple of 5 images. Image can be PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.Grayscale(num_output_channels=1)[source]
+

Convert image to grayscale. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, 3, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + + + + + +
Parameters:num_output_channels (int) – (1 or 3) number of channels desired for output image
Returns:
+
Grayscale version of the input.
+
    +
  • If num_output_channels == 1 : returned image is single channel
  • +
  • If num_output_channels == 3 : returned image is 3 channel with r == g == b
  • +
+
+
+
Return type:PIL Image
+
+
+forward(img: torch.Tensor) → torch.Tensor[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be converted to grayscale.
Returns:Grayscaled image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.Pad(padding, fill=0, padding_mode='constant')[source]
+

Pad the given image on all sides with the given “pad” value. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + +
Parameters:
    +
  • padding (int or tuple or list) – Padding on each border. If a single int is provided this +is used to pad all borders. If tuple of length 2 is provided this is the padding +on left/right and top/bottom respectively. If a tuple of length 4 is provided +this is the padding for the left, top, right and bottom borders respectively. +In torchscript mode padding as single int is not supported, use a tuple or +list of length 1: [padding, ].
  • +
  • fill (int or tuple) – Pixel fill value for constant fill. Default is 0. If a tuple of +length 3, it is used to fill R, G, B channels respectively. +This value is only used when the padding_mode is constant
  • +
  • padding_mode (str) –

    Type of padding. Should be: constant, edge, reflect or symmetric. +Default is constant. Mode symmetric is not yet supported for Tensor inputs.

    +
      +
    • constant: pads with a constant value, this value is specified with fill
    • +
    • edge: pads with the last value at the edge of the image
    • +
    • reflect: pads with reflection of image without repeating the last value on the edge
      +
      For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode +will result in [3, 2, 1, 2, 3, 4, 3, 2]
      +
    • +
    • symmetric: pads with reflection of image repeating the last value on the edge
      +
      For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode +will result in [2, 1, 1, 2, 3, 4, 4, 3]
      +
    • +
    +
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be padded.
Returns:Padded image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.RandomAffine(degrees, translate=None, scale=None, shear=None, resample=0, fillcolor=0)[source]
+

Random affine transformation of the image keeping center invariant. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions.

+ +++ + + + +
Parameters:
    +
  • degrees (sequence or float or int) – Range of degrees to select from. +If degrees is a number instead of sequence like (min, max), the range of degrees +will be (-degrees, +degrees). Set to 0 to deactivate rotations.
  • +
  • translate (tuple, optional) – tuple of maximum absolute fraction for horizontal +and vertical translations. For example translate=(a, b), then horizontal shift +is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is +randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
  • +
  • scale (tuple, optional) – scaling factor interval, e.g (a, b), then scale is +randomly sampled from the range a <= scale <= b. Will keep original scale by default.
  • +
  • shear (sequence or float or int, optional) – Range of degrees to select from. +If shear is a number, a shear parallel to the x axis in the range (-shear, +shear) +will be applied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the +range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values, +a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. +Will not apply shear by default.
  • +
  • resample (int, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST. +If input is Tensor, only PIL.Image.NEAREST and PIL.Image.BILINEAR are supported.
  • +
  • fillcolor (tuple or int) – Optional fill color (Tuple for RGB Image and int for grayscale) for the area +outside the transform in the output image (Pillow>=5.0.0). This option is not supported for Tensor +input. Fill value for the area outside the transform in the output image is always 0.
  • +
+
+
+
+forward(img)[source]
+
+
img (PIL Image or Tensor): Image to be transformed.
+ +++ + + + + + +
Returns:Affine transformed image.
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(degrees: List[float], translate: Union[List[float], NoneType], scale_ranges: Union[List[float], NoneType], shears: Union[List[float], NoneType], img_size: List[int]) → Tuple[float, Tuple[int, int], float, Tuple[float, float]][source]
+

Get parameters for affine transformation

+ +++ + + + +
Returns:params to be passed to the affine transformation
+
+ +
+ +
+
+class torchvision.transforms.RandomApply(transforms, p=0.5)[source]
+

Apply randomly a list of transformations with a given probability.

+
+

Note

+

In order to script the transformation, please use torch.nn.ModuleList as input instead of list/tuple of +transforms as shown below:

+
>>> transforms = transforms.RandomApply(torch.nn.ModuleList([
+>>>     transforms.ColorJitter(),
+>>> ]), p=0.3)
+>>> scripted_transforms = torch.jit.script(transforms)
+
+
+

Make sure to use only scriptable transformations, i.e. that work with torch.Tensor, does not require +lambda functions or PIL.Image.

+
+ +++ + + + +
Parameters:
    +
  • transforms (list or tuple or torch.nn.Module) – list of transformations
  • +
  • p (float) – probability
  • +
+
+
+ +
+
+class torchvision.transforms.RandomCrop(size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant')[source]
+

Crop the given image at a random location. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + +
Parameters:
    +
  • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
  • +
  • padding (int or sequence, optional) – Optional padding on each border +of the image. Default is None. If a single int is provided this +is used to pad all borders. If tuple of length 2 is provided this is the padding +on left/right and top/bottom respectively. If a tuple of length 4 is provided +this is the padding for the left, top, right and bottom borders respectively. +In torchscript mode padding as single int is not supported, use a tuple or +list of length 1: [padding, ].
  • +
  • pad_if_needed (boolean) – It will pad the image if smaller than the +desired size to avoid raising an exception. Since cropping is done +after padding, the padding seems to be done at a random offset.
  • +
  • fill (int or tuple) – Pixel fill value for constant fill. Default is 0. If a tuple of +length 3, it is used to fill R, G, B channels respectively. +This value is only used when the padding_mode is constant
  • +
  • padding_mode (str) –

    Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. +Mode symmetric is not yet supported for Tensor inputs.

    +
    +
      +
    • constant: pads with a constant value, this value is specified with fill
    • +
    • edge: pads with the last value on the edge of the image
    • +
    • reflect: pads with reflection of image (without repeating the last value on the edge)
      +
      padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode +will result in [3, 2, 1, 2, 3, 4, 3, 2]
      +
    • +
    • symmetric: pads with reflection of image (repeating the last value on the edge)
      +
      padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode +will result in [2, 1, 1, 2, 3, 4, 4, 3]
      +
    • +
    +
    +
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be cropped.
Returns:Cropped image.
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(img: torch.Tensor, output_size: Tuple[int, int]) → Tuple[int, int, int, int][source]
+

Get parameters for crop for a random crop.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be cropped.
  • +
  • output_size (tuple) – Expected output size of the crop.
  • +
+
Returns:

params (i, j, h, w) to be passed to crop for random crop.

+
Return type:

tuple

+
+
+ +
+ +
+
+class torchvision.transforms.RandomGrayscale(p=0.1)[source]
+

Randomly convert image to grayscale with a probability of p (default 0.1). +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, 3, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + + + + + +
Parameters:p (float) – probability that image should be converted to grayscale.
Returns:Grayscale version of the input image with probability p and unchanged +with probability (1-p). +- If input image is 1 channel: grayscale version is 1 channel +- If input image is 3 channel: grayscale version is 3 channel with r == g == b
Return type:PIL Image or Tensor
+
+
+forward(img: torch.Tensor) → torch.Tensor[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be converted to grayscale.
Returns:Randomly grayscaled image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.RandomHorizontalFlip(p=0.5)[source]
+

Horizontally flip the given image randomly with a given probability. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + +
Parameters:p (float) – probability of the image being flipped. Default value is 0.5
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be flipped.
Returns:Randomly flipped image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.RandomPerspective(distortion_scale=0.5, p=0.5, interpolation=2, fill=0)[source]
+

Performs a random perspective transformation of the given image with a given probability. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions.

+ +++ + + + +
Parameters:
    +
  • distortion_scale (float) – argument to control the degree of distortion and ranges from 0 to 1. +Default is 0.5.
  • +
  • p (float) – probability of the image being transformed. Default is 0.5.
  • +
  • interpolation (int) – Interpolation type. If input is Tensor, only PIL.Image.NEAREST and +PIL.Image.BILINEAR are supported. Default, PIL.Image.BILINEAR for PIL images and Tensors.
  • +
  • fill (n-tuple or int or float) – Pixel fill value for area outside the rotated +image. If int or float, the value is used for all bands respectively. Default is 0. +This option is only available for pillow>=5.0.0. This option is not supported for Tensor +input. Fill value for the area outside the transform in the output image is always 0.
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be Perspectively transformed.
Returns:Randomly transformed image.
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(width: int, height: int, distortion_scale: float) → Tuple[List[List[int]], List[List[int]]][source]
+

Get parameters for perspective for a random perspective transform.

+ +++ + + + + + +
Parameters:
    +
  • width (int) – width of the image.
  • +
  • height (int) – height of the image.
  • +
  • distortion_scale (float) – argument to control the degree of distortion and ranges from 0 to 1.
  • +
+
Returns:

List containing [top-left, top-right, bottom-right, bottom-left] of the original image, +List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.

+
+
+ +
+ +
+
+class torchvision.transforms.RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=2)[source]
+

Crop the given image to random size and aspect ratio. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+

A crop of random size (default: of 0.08 to 1.0) of the original size and a random +aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop +is finally resized to given size. +This is popularly used to train the Inception networks.

+ +++ + + + +
Parameters:
    +
  • size (int or sequence) – expected output size of each edge. If size is an +int instead of sequence like (h, w), a square output size (size, size) is +made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
  • +
  • scale (tuple of python:float) – range of size of the origin size cropped
  • +
  • ratio (tuple of python:float) – range of aspect ratio of the origin aspect ratio cropped.
  • +
  • interpolation (int) – Desired interpolation enum defined by filters. +Default is PIL.Image.BILINEAR. If input is Tensor, only PIL.Image.NEAREST, PIL.Image.BILINEAR +and PIL.Image.BICUBIC are supported.
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be cropped and resized.
Returns:Randomly cropped and resized image.
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(img: torch.Tensor, scale: List[float], ratio: List[float]) → Tuple[int, int, int, int][source]
+

Get parameters for crop for a random sized crop.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Input image.
  • +
  • scale (list) – range of scale of the origin size cropped
  • +
  • ratio (list) – range of aspect ratio of the origin aspect ratio cropped
  • +
+
Returns:

+
params (i, j, h, w) to be passed to crop for a random
+

sized crop.

+
+
+

+
Return type:

tuple

+
+
+ +
+ +
+
+class torchvision.transforms.RandomRotation(degrees, resample=False, expand=False, center=None, fill=None)[source]
+

Rotate the image by angle. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions.

+ +++ + + + +
Parameters:
    +
  • degrees (sequence or float or int) – Range of degrees to select from. +If degrees is a number instead of sequence like (min, max), the range of degrees +will be (-degrees, +degrees).
  • +
  • resample (int, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST. +If input is Tensor, only PIL.Image.NEAREST and PIL.Image.BILINEAR are supported.
  • +
  • expand (bool, optional) – Optional expansion flag. +If true, expands the output to make it large enough to hold the entire rotated image. +If false or omitted, make the output image the same size as the input image. +Note that the expand flag assumes rotation around the center and no translation.
  • +
  • center (list or tuple, optional) – Optional center of rotation, (x, y). Origin is the upper left corner. +Default is the center of the image.
  • +
  • fill (n-tuple or int or float) – Pixel fill value for area outside the rotated +image. If int or float, the value is used for all bands respectively. +Defaults to 0 for all bands. This option is only available for Pillow>=5.2.0. +This option is not supported for Tensor input. Fill value for the area outside the transform in the output +image is always 0.
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be rotated.
Returns:Rotated image.
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(degrees: List[float]) → float[source]
+

Get parameters for rotate for a random rotation.

+ +++ + + + + + +
Returns:angle parameter to be passed to rotate for random rotation.
Return type:float
+
+ +
+ +
+
+class torchvision.transforms.RandomSizedCrop(*args, **kwargs)[source]
+

Note: This transform is deprecated in favor of RandomResizedCrop.

+
+ +
+
+class torchvision.transforms.RandomVerticalFlip(p=0.5)[source]
+

Vertically flip the given image randomly with a given probability. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + +
Parameters:p (float) – probability of the image being flipped. Default value is 0.5
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be flipped.
Returns:Randomly flipped image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.Resize(size, interpolation=2)[source]
+

Resize the input image to the given size. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + +
Parameters:
    +
  • size (sequence or int) – Desired output size. If size is a sequence like +(h, w), output size will be matched to this. If size is an int, +smaller edge of the image will be matched to this number. +i.e, if height > width, then image will be rescaled to +(size * height / width, size). +In torchscript mode padding as single int is not supported, use a tuple or +list of length 1: [size, ].
  • +
  • interpolation (int, optional) – Desired interpolation enum defined by filters. +Default is PIL.Image.BILINEAR. If input is Tensor, only PIL.Image.NEAREST, PIL.Image.BILINEAR +and PIL.Image.BICUBIC are supported.
  • +
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be scaled.
Returns:Rescaled image.
Return type:PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.Scale(*args, **kwargs)[source]
+

Note: This transform is deprecated in favor of Resize.

+
+ +
+
+class torchvision.transforms.TenCrop(size, vertical_flip=False)[source]
+

Crop the given image into four corners and the central crop plus the flipped version of +these (horizontal flipping is used by default). +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading +dimensions

+
+

Note

+

This transform returns a tuple of images and there may be a mismatch in the number of +inputs and targets your Dataset returns. See below for an example of how to deal with +this.

+
+ +++ + + + +
Parameters:
    +
  • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
  • +
  • vertical_flip (bool) – Use vertical flipping instead of horizontal
  • +
+
+

Example

+
>>> transform = Compose([
+>>>    TenCrop(size), # this is a list of PIL Images
+>>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
+>>> ])
+>>> #In your test loop you can do the following:
+>>> input, target = batch # input is a 5d tensor, target is 2d
+>>> bs, ncrops, c, h, w = input.size()
+>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
+>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
+
+
+
+
+forward(img)[source]
+
+++ + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be cropped.
Returns:tuple of 10 images. Image can be PIL Image or Tensor
+
+ +
+ +
+
+class torchvision.transforms.GaussianBlur(kernel_size, sigma=(0.1, 2.0))[source]
+

Blurs image with randomly chosen Gaussian blur. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, 3, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + + + + + +
Parameters:
    +
  • kernel_size (int or sequence) – Size of the Gaussian kernel.
  • +
  • sigma (float or tuple of python:float (min, max)) – Standard deviation to be used for +creating kernel to perform blurring. If float, sigma is fixed. If it is tuple +of float (min, max), sigma is chosen uniformly at random to lie in the +given range.
  • +
+
Returns:

Gaussian blurred version of the input image.

+
Return type:

PIL Image or Tensor

+
+
+
+forward(img: torch.Tensor) → torch.Tensor[source]
+
+++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – image of size (C, H, W) to be blurred.
Returns:Gaussian blurred image
Return type:PIL Image or Tensor
+
+ +
+
+static get_params(sigma_min: float, sigma_max: float) → float[source]
+

Choose sigma for gaussian_blur for random gaussian blurring.

+ +++ + + + + + + + +
Parameters:
    +
  • sigma_min (float) – Minimum standard deviation that can be chosen for blurring kernel.
  • +
  • sigma_max (float) – Maximum standard deviation that can be chosen for blurring kernel.
  • +
+
Returns:

Standard deviation to be passed to calculate kernel for gaussian blurring.

+
Return type:

float

+
+
+ +
+ +
+
+

Transforms on PIL Image only

+
+
+class torchvision.transforms.RandomChoice(transforms)[source]
+

Apply single transformation randomly picked from a list. This transform does not support torchscript.

+
+ +
+
+class torchvision.transforms.RandomOrder(transforms)[source]
+

Apply a list of transformations in a random order. This transform does not support torchscript.

+
+ +
+
+

Transforms on torch.*Tensor only

+
+
+class torchvision.transforms.LinearTransformation(transformation_matrix, mean_vector)[source]
+

Transform a tensor image with a square transformation matrix and a mean_vector computed +offline. +Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and +subtract mean_vector from it which is then followed by computing the dot +product with the transformation matrix and then reshaping the tensor to its +original shape.

+
+
Applications:
+
whitening transformation: Suppose X is a column vector zero-centered data. +Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X), +perform SVD on this matrix and pass it as transformation_matrix.
+
+ +++ + + + +
Parameters:
    +
  • transformation_matrix (Tensor) – tensor [D x D], D = C x H x W
  • +
  • mean_vector (Tensor) – tensor [D], D = C x H x W
  • +
+
+
+
+forward(tensor: torch.Tensor) → torch.Tensor[source]
+
+++ + + + + + + + +
Parameters:tensor (Tensor) – Tensor image of size (C, H, W) to be whitened.
Returns:Transformed image.
Return type:Tensor
+
+ +
+ +
+
+class torchvision.transforms.Normalize(mean, std, inplace=False)[source]
+

Normalize a tensor image with mean and standard deviation. +Given mean: (mean[1],...,mean[n]) and std: (std[1],..,std[n]) for n +channels, this transform will normalize each channel of the input +torch.*Tensor i.e., +output[channel] = (input[channel] - mean[channel]) / std[channel]

+
+

Note

+

This transform acts out of place, i.e., it does not mutate the input tensor.

+
+ +++ + + + +
Parameters:
    +
  • mean (sequence) – Sequence of means for each channel.
  • +
  • std (sequence) – Sequence of standard deviations for each channel.
  • +
  • inplace (bool,optional) – Bool to make this operation in-place.
  • +
+
+
+
+forward(tensor: torch.Tensor) → torch.Tensor[source]
+
+++ + + + + + + + +
Parameters:tensor (Tensor) – Tensor image of size (C, H, W) to be normalized.
Returns:Normalized Tensor image.
Return type:Tensor
+
+ +
+ +
+
+class torchvision.transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False)[source]
+

Randomly selects a rectangle region in an image and erases its pixels. +‘Random Erasing Data Augmentation’ by Zhong et al. See https://arxiv.org/abs/1708.04896

+ +++ + + + + + +
Parameters:
    +
  • p – probability that the random erasing operation will be performed.
  • +
  • scale – range of proportion of erased area against input image.
  • +
  • ratio – range of aspect ratio of erased area.
  • +
  • value – erasing value. Default is 0. If a single int, it is used to +erase all pixels. If a tuple of length 3, it is used to erase +R, G, B channels respectively. +If a str of ‘random’, erasing each pixel with random values.
  • +
  • inplace – boolean to make this transform inplace. Default set to False.
  • +
+
Returns:

Erased Image.

+
+
+
# Examples:
+
>>> transform = transforms.Compose([
+>>>   transforms.RandomHorizontalFlip(),
+>>>   transforms.ToTensor(),
+>>>   transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+>>>   transforms.RandomErasing(),
+>>> ])
+
+
+
+
+
+
+forward(img)[source]
+
+++ + + + + + + + +
Parameters:img (Tensor) – Tensor image of size (C, H, W) to be erased.
Returns:Erased Tensor image.
Return type:img (Tensor)
+
+ +
+
+static get_params(img: torch.Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Union[List[float], NoneType] = None) → Tuple[int, int, int, int, torch.Tensor][source]
+

Get parameters for erase for a random erasing.

+ +++ + + + + + + + +
Parameters:
    +
  • img (Tensor) – Tensor image of size (C, H, W) to be erased.
  • +
  • scale (tuple or list) – range of proportion of erased area against input image.
  • +
  • ratio (tuple or list) – range of aspect ratio of erased area.
  • +
  • value (list, optional) – erasing value. If None, it is interpreted as “random” +(erasing each pixel with random values). If len(value) is 1, it is interpreted as a number, +i.e. value[0].
  • +
+
Returns:

params (i, j, h, w, v) to be passed to erase for random erasing.

+
Return type:

tuple

+
+
+ +
+ +
+
+class torchvision.transforms.ConvertImageDtype(dtype: torch.dtype) → None[source]
+

Convert a tensor image to the given dtype and scale the values accordingly

+ +++ + + + +
Parameters:dtype (torch.dpython:type) – Desired data type of the output
+
+

Note

+

When converting from a smaller to a larger integer dtype the maximum values are not mapped exactly. +If converted back and forth, this mismatch has no effect.

+
+ +++ + + + +
Raises:RuntimeError – When trying to cast torch.float32 to torch.int32 or torch.int64 as +well as for trying to cast torch.float64 to torch.int64. These conversions might lead to +overflow errors since the floating point dtype cannot store consecutive integers over the whole range +of the integer dtype.
+
+ +
+
+

Conversion Transforms

+
+
+class torchvision.transforms.ToPILImage(mode=None)[source]
+

Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.

+

Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape +H x W x C to a PIL Image while preserving the value range.

+ +++ + + + +
Parameters:mode (PIL.Image mode) – color space and pixel depth of input data (optional). +If mode is None (default) there are some assumptions made about the input data: +- If the input has 4 channels, the mode is assumed to be RGBA. +- If the input has 3 channels, the mode is assumed to be RGB. +- If the input has 2 channels, the mode is assumed to be LA. +- If the input has 1 channel, the mode is determined by the data type (i.e int, float, +short).
+
+ +
+
+class torchvision.transforms.ToTensor[source]
+

Convert a PIL Image or numpy.ndarray to tensor. This transform does not support torchscript.

+

Converts a PIL Image or numpy.ndarray (H x W x C) in the range +[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] +if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) +or if the numpy.ndarray has dtype = np.uint8

+

In the other cases, tensors are returned without scaling.

+
+

Note

+

Because the input image is scaled to [0.0, 1.0], this transformation should not be used when +transforming target image masks. See the references for implementing the transforms for image masks.

+
+
+ +
+
+

Generic Transforms

+
+
+class torchvision.transforms.Lambda(lambd)[source]
+

Apply a user-defined lambda as a transform. This transform does not support torchscript.

+ +++ + + + +
Parameters:lambd (function) – Lambda/function to be used for transform.
+
+ +
+
+

Functional Transforms

+

Functional transforms give you fine-grained control of the transformation pipeline. +As opposed to the transformations above, functional transforms don’t contain a random number +generator for their parameters. +That means you have to specify/generate all parameters, but you can reuse the functional transform.

+

Example: +you can apply a functional transform with the same parameters to multiple images like this:

+
import torchvision.transforms.functional as TF
+import random
+
+def my_segmentation_transforms(image, segmentation):
+    if random.random() > 0.5:
+        angle = random.randint(-30, 30)
+        image = TF.rotate(image, angle)
+        segmentation = TF.rotate(segmentation, angle)
+    # more transforms ...
+    return image, segmentation
+
+
+

Example: +you can use a functional transform to build transform classes with custom behavior:

+
import torchvision.transforms.functional as TF
+import random
+
+class MyRotationTransform:
+    """Rotate by one of the given angles."""
+
+    def __init__(self, angles):
+        self.angles = angles
+
+    def __call__(self, x):
+        angle = random.choice(self.angles)
+        return TF.rotate(x, angle)
+
+rotation_transform = MyRotationTransform(angles=[-30, -15, 0, 15, 30])
+
+
+
+
+torchvision.transforms.functional.adjust_brightness(img: torch.Tensor, brightness_factor: float) → torch.Tensor[source]
+

Adjust brightness of an Image.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be adjusted.
  • +
  • brightness_factor (float) – How much to adjust the brightness. Can be +any non negative number. 0 gives a black image, 1 gives the +original image while 2 increases the brightness by a factor of 2.
  • +
+
Returns:

Brightness adjusted image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.adjust_contrast(img: torch.Tensor, contrast_factor: float) → torch.Tensor[source]
+

Adjust contrast of an Image.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be adjusted.
  • +
  • contrast_factor (float) – How much to adjust the contrast. Can be any +non negative number. 0 gives a solid gray image, 1 gives the +original image while 2 increases the contrast by a factor of 2.
  • +
+
Returns:

Contrast adjusted image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.adjust_gamma(img: torch.Tensor, gamma: float, gain: float = 1) → torch.Tensor[source]
+

Perform gamma correction on an image.

+

Also known as Power Law Transform. Intensities in RGB mode are adjusted +based on the following equation:

+
+\[I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}\]
+

See Gamma Correction for more details.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – PIL Image to be adjusted.
  • +
  • gamma (float) – Non negative real number, same as \(\gamma\) in the equation. +gamma larger than 1 make the shadows darker, +while gamma smaller than 1 make dark regions lighter.
  • +
  • gain (float) – The constant multiplier.
  • +
+
Returns:

Gamma correction adjusted image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.adjust_hue(img: torch.Tensor, hue_factor: float) → torch.Tensor[source]
+

Adjust hue of an image.

+

The image hue is adjusted by converting the image to HSV and +cyclically shifting the intensities in the hue channel (H). +The image is then converted back to original image mode.

+

hue_factor is the amount of shift in H channel and must be in the +interval [-0.5, 0.5].

+

See Hue for more details.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be adjusted.
  • +
  • hue_factor (float) – How much to shift the hue channel. Should be in +[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in +HSV space in positive and negative direction respectively. +0 means no shift. Therefore, both -0.5 and 0.5 will give an image +with complementary colors while 0 gives the original image.
  • +
+
Returns:

Hue adjusted image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.adjust_saturation(img: torch.Tensor, saturation_factor: float) → torch.Tensor[source]
+

Adjust color saturation of an image.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be adjusted.
  • +
  • saturation_factor (float) – How much to adjust the saturation. 0 will +give a black and white image, 1 will give the original image while +2 will enhance the saturation by a factor of 2.
  • +
+
Returns:

Saturation adjusted image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.affine(img: torch.Tensor, angle: float, translate: List[int], scale: float, shear: List[float], resample: int = 0, fillcolor: Union[int, NoneType] = None) → torch.Tensor[source]
+

Apply affine transformation on the image keeping image center invariant. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – image to transform.
  • +
  • angle (float or int) – rotation angle in degrees between -180 and 180, clockwise direction.
  • +
  • translate (list or tuple of python:integers) – horizontal and vertical translations (post-rotation translation)
  • +
  • scale (float) – overall scale
  • +
  • shear (float or tuple or list) – shear angle value in degrees between -180 to 180, clockwise direction. +If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while +the second value corresponds to a shear parallel to the y axis.
  • +
  • resample (PIL.Image.NEAREST or PIL.Image.BILINEAR or PIL.Image.BICUBIC, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image is PIL Image and has mode “1” or “P”, it is set to PIL.Image.NEAREST. +If input is Tensor, only PIL.Image.NEAREST and PIL.Image.BILINEAR are supported.
  • +
  • fillcolor (int) – Optional fill color for the area outside the transform in the output image (Pillow>=5.0.0). +This option is not supported for Tensor input. Fill value for the area outside the transform in the output +image is always 0.
  • +
+
Returns:

Transformed image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.center_crop(img: torch.Tensor, output_size: List[int]) → torch.Tensor[source]
+

Crops the given image at the center. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be cropped.
  • +
  • output_size (sequence or int) – (height, width) of the crop box. If int or sequence with single int +it is used for both directions.
  • +
+
Returns:

Cropped image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float32) → torch.Tensor[source]
+

Convert a tensor image to the given dtype and scale the values accordingly

+ +++ + + + + + + + +
Parameters:
    +
  • image (torch.Tensor) – Image to be converted
  • +
  • dtype (torch.dpython:type) – Desired data type of the output
  • +
+
Returns:

Converted image

+
Return type:

(torch.Tensor)

+
+
+

Note

+

When converting from a smaller to a larger integer dtype the maximum values are not mapped exactly. +If converted back and forth, this mismatch has no effect.

+
+ +++ + + + +
Raises:RuntimeError – When trying to cast torch.float32 to torch.int32 or torch.int64 as +well as for trying to cast torch.float64 to torch.int64. These conversions might lead to +overflow errors since the floating point dtype cannot store consecutive integers over the whole range +of the integer dtype.
+
+ +
+
+torchvision.transforms.functional.crop(img: torch.Tensor, top: int, left: int, height: int, width: int) → torch.Tensor[source]
+

Crop the given image at specified location and output size. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading +dimensions

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be cropped. (0,0) denotes the top left corner of the image.
  • +
  • top (int) – Vertical component of the top left corner of the crop box.
  • +
  • left (int) – Horizontal component of the top left corner of the crop box.
  • +
  • height (int) – Height of the crop box.
  • +
  • width (int) – Width of the crop box.
  • +
+
Returns:

Cropped image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.erase(img: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False) → torch.Tensor[source]
+

Erase the input Tensor Image with given value.

+ +++ + + + + + + + +
Parameters:
    +
  • img (Tensor Image) – Tensor image of size (C, H, W) to be erased
  • +
  • i (int) – i in (i,j) i.e coordinates of the upper left corner.
  • +
  • j (int) – j in (i,j) i.e coordinates of the upper left corner.
  • +
  • h (int) – Height of the erased region.
  • +
  • w (int) – Width of the erased region.
  • +
  • v – Erasing value.
  • +
  • inplace (bool, optional) – For in-place operations. By default is set False.
  • +
+
Returns:

Erased image.

+
Return type:

Tensor Image

+
+
+ +
+
+torchvision.transforms.functional.five_crop(img: torch.Tensor, size: List[int]) → Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor][source]
+

Crop the given image into four corners and the central crop. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+
+

Note

+

This transform returns a tuple of images and there may be a +mismatch in the number of inputs and targets your Dataset returns.

+
+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be cropped.
  • +
  • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
  • +
+
Returns:

+
tuple (tl, tr, bl, br, center)
+

Corresponding top left, top right, bottom left, bottom right and center crop.

+
+
+

+
Return type:

tuple

+
+
+ +
+
+torchvision.transforms.functional.gaussian_blur(img: torch.Tensor, kernel_size: List[int], sigma: Union[List[float], NoneType] = None) → torch.Tensor[source]
+

Performs Gaussian blurring on the img by given kernel. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be blurred
  • +
  • kernel_size (sequence of python:ints or int) – Gaussian kernel size. Can be a sequence of integers +like (kx, ky) or a single integer for square kernels. +In torchscript mode kernel_size as single int is not supported, use a tuple or +list of length 1: [ksize, ].
  • +
  • sigma (sequence of python:floats or float, optional) – Gaussian kernel standard deviation. Can be a +sequence of floats like (sigma_x, sigma_y) or a single float to define the +same sigma in both X/Y directions. If None, then it is computed using +kernel_size as sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8. +Default, None. In torchscript mode sigma as single float is +not supported, use a tuple or list of length 1: [sigma, ].
  • +
+
Returns:

Gaussian Blurred version of the image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.hflip(img: torch.Tensor) → torch.Tensor[source]
+

Horizontally flip the given PIL Image or Tensor.

+ +++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be flipped. If img +is a Tensor, it is expected to be in […, H, W] format, +where … means it can have an arbitrary number of trailing +dimensions.
Returns:Horizontally flipped image.
Return type:PIL Image or Tensor
+
+ +
+
+torchvision.transforms.functional.normalize(tensor: torch.Tensor, mean: List[float], std: List[float], inplace: bool = False) → torch.Tensor[source]
+

Normalize a tensor image with mean and standard deviation.

+
+

Note

+

This transform acts out of place by default, i.e., it does not mutates the input tensor.

+
+

See Normalize for more details.

+ +++ + + + + + + + +
Parameters:
    +
  • tensor (Tensor) – Tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
  • +
  • mean (sequence) – Sequence of means for each channel.
  • +
  • std (sequence) – Sequence of standard deviations for each channel.
  • +
  • inplace (bool,optional) – Bool to make this operation inplace.
  • +
+
Returns:

Normalized Tensor image.

+
Return type:

Tensor

+
+
+ +
+
+torchvision.transforms.functional.pad(img: torch.Tensor, padding: List[int], fill: int = 0, padding_mode: str = 'constant') → torch.Tensor[source]
+

Pad the given image on all sides with the given “pad” value. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be padded.
  • +
  • padding (int or tuple or list) – Padding on each border. If a single int is provided this +is used to pad all borders. If tuple of length 2 is provided this is the padding +on left/right and top/bottom respectively. If a tuple of length 4 is provided +this is the padding for the left, top, right and bottom borders respectively. +In torchscript mode padding as single int is not supported, use a tuple or +list of length 1: [padding, ].
  • +
  • fill (int or str or tuple) – Pixel fill value for constant fill. Default is 0. If a tuple of +length 3, it is used to fill R, G, B channels respectively. +This value is only used when the padding_mode is constant. Only int value is supported for Tensors.
  • +
  • padding_mode

    Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. +Mode symmetric is not yet supported for Tensor inputs.

    +
      +
    • constant: pads with a constant value, this value is specified with fill
    • +
    • edge: pads with the last value on the edge of the image
    • +
    • reflect: pads with reflection of image (without repeating the last value on the edge)
      +
      padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode +will result in [3, 2, 1, 2, 3, 4, 3, 2]
      +
    • +
    • symmetric: pads with reflection of image (repeating the last value on the edge)
      +
      padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode +will result in [2, 1, 1, 2, 3, 4, 4, 3]
      +
    • +
    +
  • +
+
Returns:

Padded image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.perspective(img: torch.Tensor, startpoints: List[List[int]], endpoints: List[List[int]], interpolation: int = 2, fill: Union[int, NoneType] = None) → torch.Tensor[source]
+

Perform perspective transform of the given image. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be transformed.
  • +
  • startpoints (list of list of python:ints) – List containing four lists of two integers corresponding to four corners +[top-left, top-right, bottom-right, bottom-left] of the original image.
  • +
  • endpoints (list of list of python:ints) – List containing four lists of two integers corresponding to four corners +[top-left, top-right, bottom-right, bottom-left] of the transformed image.
  • +
  • interpolation (int) – Interpolation type. If input is Tensor, only PIL.Image.NEAREST and +PIL.Image.BILINEAR are supported. Default, PIL.Image.BILINEAR for PIL images and Tensors.
  • +
  • fill (n-tuple or int or float) – Pixel fill value for area outside the rotated +image. If int or float, the value is used for all bands respectively. +This option is only available for pillow>=5.0.0. This option is not supported for Tensor +input. Fill value for the area outside the transform in the output image is always 0.
  • +
+
Returns:

transformed Image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.pil_to_tensor(pic)[source]
+

Convert a PIL Image to a tensor of the same type.

+

See PILToTensor for more details.

+ +++ + + + + + + + +
Parameters:pic (PIL Image) – Image to be converted to tensor.
Returns:Converted image.
Return type:Tensor
+
+ +
+
+torchvision.transforms.functional.resize(img: torch.Tensor, size: List[int], interpolation: int = 2) → torch.Tensor[source]
+

Resize the input image to the given size. +The image can be a PIL Image or a torch Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be resized.
  • +
  • size (sequence or int) – Desired output size. If size is a sequence like +(h, w), the output size will be matched to this. If size is an int, +the smaller edge of the image will be matched to this number maintaining +the aspect ratio. i.e, if height > width, then image will be rescaled to +\(\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)\). +In torchscript mode size as single int is not supported, use a tuple or +list of length 1: [size, ].
  • +
  • interpolation (int, optional) – Desired interpolation enum defined by filters. +Default is PIL.Image.BILINEAR. If input is Tensor, only PIL.Image.NEAREST, PIL.Image.BILINEAR +and PIL.Image.BICUBIC are supported.
  • +
+
Returns:

Resized image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.resized_crop(img: torch.Tensor, top: int, left: int, height: int, width: int, size: List[int], interpolation: int = 2) → torch.Tensor[source]
+

Crop the given image and resize it to desired size. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+

Notably used in RandomResizedCrop.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be cropped. (0,0) denotes the top left corner of the image.
  • +
  • top (int) – Vertical component of the top left corner of the crop box.
  • +
  • left (int) – Horizontal component of the top left corner of the crop box.
  • +
  • height (int) – Height of the crop box.
  • +
  • width (int) – Width of the crop box.
  • +
  • size (sequence or int) – Desired output size. Same semantics as resize.
  • +
  • interpolation (int, optional) – Desired interpolation enum defined by filters. +Default is PIL.Image.BILINEAR. If input is Tensor, only PIL.Image.NEAREST, PIL.Image.BILINEAR +and PIL.Image.BICUBIC are supported.
  • +
+
Returns:

Cropped image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.rgb_to_grayscale(img: torch.Tensor, num_output_channels: int = 1) → torch.Tensor[source]
+

Convert RGB image to grayscale version of image. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+
+

Note

+

Please, note that this method supports only RGB images as input. For inputs in other color spaces, +please, consider using meth:~torchvision.transforms.functional.to_grayscale with PIL Image.

+
+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – RGB Image to be converted to grayscale.
  • +
  • num_output_channels (int) – number of channels of the output image. Value can be 1 or 3. Default, 1.
  • +
+
Returns:

+
Grayscale version of the image.
+

if num_output_channels = 1 : returned image is single channel

+

if num_output_channels = 3 : returned image is 3 channel with r = g = b

+
+
+

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.rotate(img: torch.Tensor, angle: float, resample: int = 0, expand: bool = False, center: Union[List[int], NoneType] = None, fill: Union[int, NoneType] = None) → torch.Tensor[source]
+

Rotate the image by angle. +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – image to be rotated.
  • +
  • angle (float or int) – rotation angle value in degrees, counter-clockwise.
  • +
  • resample (PIL.Image.NEAREST or PIL.Image.BILINEAR or PIL.Image.BICUBIC, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST.
  • +
  • expand (bool, optional) – Optional expansion flag. +If true, expands the output image to make it large enough to hold the entire rotated image. +If false or omitted, make the output image the same size as the input image. +Note that the expand flag assumes rotation around the center and no translation.
  • +
  • center (list or tuple, optional) – Optional center of rotation. Origin is the upper left corner. +Default is the center of the image.
  • +
  • fill (n-tuple or int or float) – Pixel fill value for area outside the rotated +image. If int or float, the value is used for all bands respectively. +Defaults to 0 for all bands. This option is only available for pillow>=5.2.0. +This option is not supported for Tensor input. Fill value for the area outside the transform in the output +image is always 0.
  • +
+
Returns:

Rotated image.

+
Return type:

PIL Image or Tensor

+
+
+ +
+
+torchvision.transforms.functional.ten_crop(img: torch.Tensor, size: List[int], vertical_flip: bool = False) → List[torch.Tensor][source]
+

Generate ten cropped images from the given image. +Crop the given image into four corners and the central crop plus the +flipped version of these (horizontal flipping is used by default). +The image can be a PIL Image or a Tensor, in which case it is expected +to have […, H, W] shape, where … means an arbitrary number of leading dimensions

+
+

Note

+

This transform returns a tuple of images and there may be a +mismatch in the number of inputs and targets your Dataset returns.

+
+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image or Tensor) – Image to be cropped.
  • +
  • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
  • +
  • vertical_flip (bool) – Use vertical flipping instead of horizontal
  • +
+
Returns:

+
tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
+

Corresponding top left, top right, bottom left, bottom right and +center crop and same for the flipped image.

+
+
+

+
Return type:

tuple

+
+
+ +
+
+torchvision.transforms.functional.to_grayscale(img, num_output_channels=1)[source]
+

Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.

+ +++ + + + + + + + +
Parameters:
    +
  • img (PIL Image) – PIL Image to be converted to grayscale.
  • +
  • num_output_channels (int) – number of channels of the output image. Value can be 1 or 3. Default, 1.
  • +
+
Returns:

+
Grayscale version of the image.
+

if num_output_channels = 1 : returned image is single channel

+

if num_output_channels = 3 : returned image is 3 channel with r = g = b

+
+
+

+
Return type:

PIL Image

+
+
+ +
+
+torchvision.transforms.functional.to_pil_image(pic, mode=None)[source]
+

Convert a tensor or an ndarray to PIL Image.

+

See ToPILImage for more details.

+ +++ + + + +
Parameters:
    +
  • pic (Tensor or numpy.ndarray) – Image to be converted to PIL Image.
  • +
  • mode (PIL.Image mode) – color space and pixel depth of input data (optional).
  • +
+
+ +++ + + + + + +
Returns:Image converted to PIL Image.
Return type:PIL Image
+
+ +
+
+torchvision.transforms.functional.to_tensor(pic)[source]
+

Convert a PIL Image or numpy.ndarray to tensor.

+

See ToTensor for more details.

+ +++ + + + + + + + +
Parameters:pic (PIL Image or numpy.ndarray) – Image to be converted to tensor.
Returns:Converted image.
Return type:Tensor
+
+ +
+
+torchvision.transforms.functional.vflip(img: torch.Tensor) → torch.Tensor[source]
+

Vertically flip the given PIL Image or torch Tensor.

+ +++ + + + + + + + +
Parameters:img (PIL Image or Tensor) – Image to be flipped. If img +is a Tensor, it is expected to be in […, H, W] format, +where … means it can have an arbitrary number of trailing +dimensions.
Returns:Vertically flipped image.
Return type:PIL Image
+
+ +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/1.7.0/torchvision/utils.html b/docs/1.7.0/torchvision/utils.html new file mode 100644 index 000000000000..97deca0c454c --- /dev/null +++ b/docs/1.7.0/torchvision/utils.html @@ -0,0 +1,618 @@ + + + + + + + + + + + + torchvision.utils — Torchvision 0.8.0.dev20201012 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchvision.utils
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchvision.utils

+
+
+torchvision.utils.make_grid(tensor: Union[torch.Tensor, List[torch.Tensor]], nrow: int = 8, padding: int = 2, normalize: bool = False, range: Union[Tuple[int, int], NoneType] = None, scale_each: bool = False, pad_value: int = 0) → torch.Tensor[source]
+

Make a grid of images.

+ +++ + + + +
Parameters:
    +
  • tensor (Tensor or list) – 4D mini-batch Tensor of shape (B x C x H x W) +or a list of images all of the same size.
  • +
  • nrow (int, optional) – Number of images displayed in each row of the grid. +The final grid size is (B / nrow, nrow). Default: 8.
  • +
  • padding (int, optional) – amount of padding. Default: 2.
  • +
  • normalize (bool, optional) – If True, shift the image to the range (0, 1), +by the min and max values specified by range. Default: False.
  • +
  • range (tuple, optional) – tuple (min, max) where min and max are numbers, +then these numbers are used to normalize the image. By default, min and max +are computed from the tensor.
  • +
  • scale_each (bool, optional) – If True, scale each image in the batch of +images separately rather than the (min, max) over all images. Default: False.
  • +
  • pad_value (float, optional) – Value for the padded pixels. Default: 0.
  • +
+
+

Example

+

See this notebook here

+
+ +
+
+torchvision.utils.save_image(tensor: Union[torch.Tensor, List[torch.Tensor]], fp: Union[str, pathlib.Path, BinaryIO], nrow: int = 8, padding: int = 2, normalize: bool = False, range: Union[Tuple[int, int], NoneType] = None, scale_each: bool = False, pad_value: int = 0, format: Union[str, NoneType] = None) → None[source]
+

Save a given Tensor into an image file.

+ +++ + + + +
Parameters:
    +
  • tensor (Tensor or list) – Image to be saved. If given a mini-batch tensor, +saves the tensor as a grid of images by calling make_grid.
  • +
  • fp (string or file object) – A filename or a file object
  • +
  • format (Optional) – If omitted, the format to use is determined from the filename extension. +If a file object was used instead of a filename, this parameter should always be used.
  • +
  • **kwargs – Other arguments are documented in make_grid.
  • +
+
+
+ +
+ + +
+ +
+ + +
+
+ +
+
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file