|
105 | 105 | " Orientationd,\n",
|
106 | 106 | " Spacingd,\n",
|
107 | 107 | " ScaleIntensityRanged,\n",
|
108 |
| - " Compose\n", |
| 108 | + " Compose,\n", |
109 | 109 | ")\n",
|
110 |
| - "from monai.data import Dataset,ThreadDataLoader\n", |
| 110 | + "from monai.data import Dataset, ThreadDataLoader\n", |
111 | 111 | "import torch\n",
|
112 | 112 | "import numpy as np\n",
|
113 | 113 | "import copy\n",
|
|
273 | 273 | "metadata": {},
|
274 | 274 | "outputs": [],
|
275 | 275 | "source": [
|
276 |
| - "transforms = Compose([\n", |
277 |
| - " LoadImaged(keys=\"image\", reader=\"NibabelReader\", to_gpu=False),\n", |
278 |
| - " EnsureTyped(keys=\"image\", device=torch.device(\"cuda:0\")),\n", |
279 |
| - " EnsureChannelFirstd(keys=\"image\"),\n", |
280 |
| - " Orientationd(keys=\"image\", axcodes=\"RAS\"),\n", |
281 |
| - " Spacingd(keys=\"image\", pixdim=[1.5, 1.5, 2.0], mode=\"bilinear\"),\n", |
282 |
| - " ScaleIntensityRanged(keys=\"image\", a_min=-57, a_max=164, b_min=0, b_max=1, clip=True),\n", |
283 |
| - "])\n", |
| 276 | + "transforms = Compose(\n", |
| 277 | + " [\n", |
| 278 | + " LoadImaged(keys=\"image\", reader=\"NibabelReader\", to_gpu=False),\n", |
| 279 | + " EnsureTyped(keys=\"image\", device=torch.device(\"cuda:0\")),\n", |
| 280 | + " EnsureChannelFirstd(keys=\"image\"),\n", |
| 281 | + " Orientationd(keys=\"image\", axcodes=\"RAS\"),\n", |
| 282 | + " Spacingd(keys=\"image\", pixdim=[1.5, 1.5, 2.0], mode=\"bilinear\"),\n", |
| 283 | + " ScaleIntensityRanged(keys=\"image\", a_min=-57, a_max=164, b_min=0, b_max=1, clip=True),\n", |
| 284 | + " ]\n", |
| 285 | + ")\n", |
284 | 286 | "\n",
|
285 | 287 | "dataset = Dataset(data=[{\"image\": i} for i in train_files], transform=transforms)\n",
|
286 | 288 | "dataloader = ThreadDataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)"
|
|
0 commit comments