Skip to content

Commit 376c9ca

Browse files
authored
Update pathology tumor detection with new per-image cuCIM transforms (#619)
1 parent 2b56524 commit 376c9ca

File tree

1 file changed

+13
-12
lines changed

1 file changed

+13
-12
lines changed

pathology/tumor_detection/torch/camelyon_train_evaluate_pytorch_gpu.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -78,12 +78,16 @@ def training(
7878
writer: SummaryWriter,
7979
print_step,
8080
):
81+
summary["epoch"] += 1
82+
8183
model.train()
8284

8385
n_steps = len(dataloader)
8486
iter_data = iter(dataloader)
8587

8688
for step in range(n_steps):
89+
summary["step"] += 1
90+
8791
batch = next(iter_data)
8892
x = batch["image"].to(device)
8993
y = batch["label"].to(device)
@@ -120,9 +124,6 @@ def training(
120124
f"train_loss: {loss_data:.5f}, train_acc: {acc_data:.3f}"
121125
)
122126

123-
summary["step"] += 1
124-
125-
summary["epoch"] += 1
126127
return summary
127128

128129

@@ -213,8 +214,8 @@ def main(cfg):
213214
preprocess_gpu_train = Compose(
214215
[
215216
ToCupy(),
216-
RandCuCIM(name="color_jitter", brightness=64.0 / 255.0, contrast=0.75, saturation=0.25, hue=0.04),
217-
RandCuCIM(name="image_flip", apply_prob=cfg["prob"], spatial_axis=-1),
217+
RandCuCIM(name="rand_color_jitter", prob=cfg["prob"], brightness=64.0 / 255.0, contrast=0.75, saturation=0.25, hue=0.04),
218+
RandCuCIM(name="rand_image_flip", prob=cfg["prob"], spatial_axis=-1),
218219
RandCuCIM(name="rand_image_rotate_90", prob=cfg["prob"], max_k=3, spatial_axis=(-2, -1)),
219220
CastToType(dtype=np.float32),
220221
RandCuCIM(name="rand_zoom", prob=cfg["prob"], min_zoom=0.9, max_zoom=1.1),
@@ -347,7 +348,7 @@ def main(cfg):
347348
# -------------------------------------------------------------------------
348349
# Training/Evaluating
349350
# -------------------------------------------------------------------------
350-
train_counter = {"n_epochs": cfg["n_epochs"], "epoch": 1, "step": 1}
351+
train_counter = {"n_epochs": cfg["n_epochs"], "epoch": 0, "step": 0}
351352

352353
total_valid_time, total_train_time = 0.0, 0.0
353354
t_start = time.perf_counter()
@@ -403,7 +404,7 @@ def main(cfg):
403404
writer.add_scalar("valid/accuracy", valid_acc, train_counter["epoch"])
404405

405406
logging.info(
406-
f"[Epoch: {train_counter['epoch']}/{cfg['n_epochs']}] loss: {valid_loss:.3f}, accuracy: {valid_acc:.2f}, "
407+
f"[Epoch: {train_counter['epoch']}/{cfg['n_epochs']}] loss: {valid_loss:.3f}, accuracy: {valid_acc:.3f}, "
407408
f"time: {t_valid - t_epoch:.1f}s (train: {train_time:.1f}s, valid: {valid_time:.1f}s)"
408409
)
409410
else:
@@ -421,12 +422,12 @@ def main(cfg):
421422
# Save the best and final model
422423
if cfg["validate"] is True:
423424
copyfile(
424-
os.path.join(log_dir, f"model_epoch_{metric_summary['best_epoch']}.pth"),
425-
os.path.join(log_dir, "model_best.pth"),
425+
os.path.join(log_dir, f"model_epoch_{metric_summary['best_epoch']}.pt"),
426+
os.path.join(log_dir, "model_best.pt"),
426427
)
427428
copyfile(
428-
os.path.join(log_dir, f"model_epoch_{cfg['n_epochs']}.pth"),
429-
os.path.join(log_dir, "model_final.pth"),
429+
os.path.join(log_dir, f"model_epoch_{cfg['n_epochs']}.pt"),
430+
os.path.join(log_dir, "model_final.pt"),
430431
)
431432

432433
# Final prints
@@ -478,7 +479,7 @@ def parse_arguments():
478479
parser.add_argument("--optimized", action="store_true", help="use optimized parameters")
479480
parser.add_argument("-b", "--backend", type=str, dest="backend", help="backend for transforms")
480481

481-
parser.add_argument("--cpu", type=int, default=10, dest="num_workers", help="number of workers")
482+
parser.add_argument("--cpu", type=int, default=8, dest="num_workers", help="number of workers")
482483
parser.add_argument("--gpu", type=str, default="0", dest="gpu", help="which gpu to use")
483484

484485
args = parser.parse_args()

0 commit comments

Comments
 (0)