From d95b3d2e2a5542ca361da872a6b352722d4ae0af Mon Sep 17 00:00:00 2001 From: Can Zhao Date: Sat, 18 Mar 2023 06:00:46 -0400 Subject: [PATCH 1/8] add transform that can convert empty box to standard format; add verbose choice during training Signed-off-by: Can Zhao --- detection/README.md | 8 +++ detection/generate_transforms.py | 21 ++----- detection/luna16_training.py | 98 ++++++++++++++++++++++++-------- 3 files changed, 89 insertions(+), 38 deletions(-) diff --git a/detection/README.md b/detection/README.md index d4e7da35b5..d41946b0e4 100644 --- a/detection/README.md +++ b/detection/README.md @@ -108,6 +108,14 @@ python3 luna16_testing.py \ -c ./config/config_train_luna16_16g.json ``` +If you are tuning hyper-parameters, please add "-v" flag, e.g. +```bash +python3 luna16_testing.py \ + -e ./config/environment_luna16_fold${i}.json \ + -c ./config/config_train_luna16_16g.json -v +``` +Details about matched anchors during training will be printed out. + #### [3.4 LUNA16 Detection Evaluation](./run_luna16_offical_eval.sh) Please download the official LUNA16 evaluation scripts from https://luna16.grand-challenge.org/Evaluation/, and save it as ./evaluation_luna16. Note that the official LUNA16 evaluation scripts are based on python2. diff --git a/detection/generate_transforms.py b/detection/generate_transforms.py index 32c5ffd22e..6d98a18e09 100644 --- a/detection/generate_transforms.py +++ b/detection/generate_transforms.py @@ -1,14 +1,3 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import torch import numpy as np from monai.transforms import ( @@ -37,6 +26,7 @@ RandRotateBox90d, RandZoomBoxd, ConvertBoxModed, + StandardizeEmptyBoxd ) @@ -70,7 +60,6 @@ def generate_detection_train_transform( Return: training transform for detection """ - amp = True if amp: compute_dtype = torch.float16 else: @@ -82,6 +71,7 @@ def generate_detection_train_transform( EnsureChannelFirstd(keys=[image_key]), EnsureTyped(keys=[image_key, box_key], dtype=torch.float32), EnsureTyped(keys=[label_key], dtype=torch.long), + StandardizeEmptyBoxd(box_keys=[box_key], box_ref_image_keys=image_key), Orientationd(keys=[image_key], axcodes="RAS"), intensity_transform, EnsureTyped(keys=[image_key], dtype=torch.float16), @@ -216,7 +206,6 @@ def generate_detection_val_transform( Return: validation transform for detection """ - amp = True if amp: compute_dtype = torch.float16 else: @@ -228,6 +217,7 @@ def generate_detection_val_transform( EnsureChannelFirstd(keys=[image_key]), EnsureTyped(keys=[image_key, box_key], dtype=torch.float32), EnsureTyped(keys=[label_key], dtype=torch.long), + StandardizeEmptyBoxd(box_keys=[box_key], box_ref_image_keys=image_key), Orientationd(keys=[image_key], axcodes="RAS"), intensity_transform, ConvertBoxToStandardModed(box_keys=[box_key], mode=gt_box_mode), @@ -272,7 +262,6 @@ def generate_detection_inference_transform( Return: validation transform for detection """ - amp = True if amp: compute_dtype = torch.float16 else: @@ -302,7 +291,9 @@ def generate_detection_inference_transform( image_meta_key_postfix="meta_dict", affine_lps_to_ras=affine_lps_to_ras, ), - ConvertBoxModed(box_keys=[pred_box_key], src_mode="xyzxyz", dst_mode=gt_box_mode), + ConvertBoxModed( + box_keys=[pred_box_key], src_mode="xyzxyz", dst_mode=gt_box_mode + ), DeleteItemsd(keys=[image_key]), ] ) diff --git a/detection/luna16_training.py b/detection/luna16_training.py index bb11e5ceb4..fbf19d72e4 100644 --- a/detection/luna16_training.py +++ b/detection/luna16_training.py @@ -53,9 +53,16 @@ def main(): ) parser.add_argument( "-c", - "--config-file", + "--config-file", default="./config/config_train.json", - help="config json file that stores hyper-parameters", + help="config json file that stores hyper-parameters", + ) + parser.add_argument( + "-v", + "--verbose", + default=False, + action="store_true", + help="whether to print verbose detail during training, recommand True when you are not sure about hyper-parameters", ) args = parser.parse_args() @@ -176,7 +183,10 @@ def main(): returned_layers=args.returned_layers, ) num_anchors = anchor_generator.num_anchors_per_location()[0] - size_divisible = [s * 2 * 2 ** max(args.returned_layers) for s in feature_extractor.body.conv1.stride] + size_divisible = [ + s * 2 * 2 ** max(args.returned_layers) + for s in feature_extractor.body.conv1.stride + ] net = torch.jit.script( RetinaNet( spatial_dims=args.spatial_dims, @@ -188,7 +198,9 @@ def main(): ) # 3) build detector - detector = RetinaNetDetector(network=net, anchor_generator=anchor_generator, debug=False).to(device) + detector = RetinaNetDetector( + network=net, anchor_generator=anchor_generator, debug=args.verbose + ).to(device) # set training components detector.set_atss_matcher(num_candidates=4, center_in_gt=False) @@ -224,8 +236,12 @@ def main(): weight_decay=3e-5, nesterov=True, ) - after_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=150, gamma=0.1) - scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=10, after_scheduler=after_scheduler) + after_scheduler = torch.optim.lr_scheduler.StepLR( + optimizer, step_size=150, gamma=0.1 + ) + scheduler_warmup = GradualWarmupScheduler( + optimizer, multiplier=1, total_epoch=10, after_scheduler=after_scheduler + ) scaler = torch.cuda.amp.GradScaler() if amp else None optimizer.zero_grad() optimizer.step() @@ -241,7 +257,9 @@ def main(): max_epochs = 300 epoch_len = len(train_ds) // train_loader.batch_size - w_cls = config_dict.get("w_cls", 1.0) # weight between classification loss and box regression loss, default 1.0 + w_cls = config_dict.get( + "w_cls", 1.0 + ) # weight between classification loss and box regression loss, default 1.0 for epoch in range(max_epochs): # ------------- Training ------------- print("-" * 10) @@ -257,7 +275,9 @@ def main(): for batch_data in train_loader: step += 1 inputs = [ - batch_data_ii["image"].to(device) for batch_data_i in batch_data for batch_data_ii in batch_data_i + batch_data_ii["image"].to(device) + for batch_data_i in batch_data + for batch_data_ii in batch_data_i ] targets = [ dict( @@ -274,7 +294,10 @@ def main(): if amp and (scaler is not None): with torch.cuda.amp.autocast(): outputs = detector(inputs, targets) - loss = w_cls * outputs[detector.cls_key] + outputs[detector.box_reg_key] + loss = ( + w_cls * outputs[detector.cls_key] + + outputs[detector.box_reg_key] + ) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() @@ -289,7 +312,9 @@ def main(): epoch_cls_loss += outputs[detector.cls_key].detach().item() epoch_box_reg_loss += outputs[detector.box_reg_key].detach().item() print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}") - tensorboard_writer.add_scalar("train_loss", loss.detach().item(), epoch_len * epoch + step) + tensorboard_writer.add_scalar( + "train_loss", loss.detach().item(), epoch_len * epoch + step + ) end_time = time.time() print(f"Training time: {end_time-start_time}s") @@ -304,8 +329,12 @@ def main(): print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") tensorboard_writer.add_scalar("avg_train_loss", epoch_loss, epoch + 1) tensorboard_writer.add_scalar("avg_train_cls_loss", epoch_cls_loss, epoch + 1) - tensorboard_writer.add_scalar("avg_train_box_reg_loss", epoch_box_reg_loss, epoch + 1) - tensorboard_writer.add_scalar("train_lr", optimizer.param_groups[0]["lr"], epoch + 1) + tensorboard_writer.add_scalar( + "avg_train_box_reg_loss", epoch_box_reg_loss, epoch + 1 + ) + tensorboard_writer.add_scalar( + "train_lr", optimizer.param_groups[0]["lr"], epoch + 1 + ) # save last trained model torch.jit.save(detector.network, env_dict["model_path"][:-3] + "_last.pt") @@ -322,9 +351,15 @@ def main(): # if all val_data_i["image"] smaller than args.val_patch_size, no need to use inferer # otherwise, need inferer to handle large input images. use_inferer = not all( - [val_data_i["image"][0, ...].numel() < np.prod(args.val_patch_size) for val_data_i in val_data] + [ + val_data_i["image"][0, ...].numel() + < np.prod(args.val_patch_size) + for val_data_i in val_data + ] ) - val_inputs = [val_data_i.pop("image").to(device) for val_data_i in val_data] + val_inputs = [ + val_data_i.pop("image").to(device) for val_data_i in val_data + ] if amp: with torch.cuda.amp.autocast(): @@ -343,9 +378,14 @@ def main(): draw_img = visualize_one_xy_slice_in_3d_image( gt_boxes=val_data[0]["box"].cpu().detach().numpy(), image=val_inputs[0][0, ...].cpu().detach().numpy(), - pred_boxes=val_outputs[0][detector.target_box_key].cpu().detach().numpy(), + pred_boxes=val_outputs[0][detector.target_box_key] + .cpu() + .detach() + .numpy(), + ) + tensorboard_writer.add_image( + "val_img_xy", draw_img.transpose([2, 1, 0]), epoch + 1 ) - tensorboard_writer.add_image("val_img_xy", draw_img.transpose([2, 1, 0]), epoch + 1) # compute metrics del val_inputs @@ -354,17 +394,24 @@ def main(): iou_fn=box_utils.box_iou, iou_thresholds=coco_metric.iou_thresholds, pred_boxes=[ - val_data_i[detector.target_box_key].cpu().detach().numpy() for val_data_i in val_outputs_all + val_data_i[detector.target_box_key].cpu().detach().numpy() + for val_data_i in val_outputs_all ], pred_classes=[ - val_data_i[detector.target_label_key].cpu().detach().numpy() for val_data_i in val_outputs_all + val_data_i[detector.target_label_key].cpu().detach().numpy() + for val_data_i in val_outputs_all ], pred_scores=[ - val_data_i[detector.pred_score_key].cpu().detach().numpy() for val_data_i in val_outputs_all + val_data_i[detector.pred_score_key].cpu().detach().numpy() + for val_data_i in val_outputs_all + ], + gt_boxes=[ + val_data_i[detector.target_box_key].cpu().detach().numpy() + for val_data_i in val_targets_all ], - gt_boxes=[val_data_i[detector.target_box_key].cpu().detach().numpy() for val_data_i in val_targets_all], gt_classes=[ - val_data_i[detector.target_label_key].cpu().detach().numpy() for val_data_i in val_targets_all + val_data_i[detector.target_label_key].cpu().detach().numpy() + for val_data_i in val_targets_all ], ) val_epoch_metric_dict = coco_metric(results_metric)[0] @@ -372,7 +419,9 @@ def main(): # write to tensorboard event for k in val_epoch_metric_dict.keys(): - tensorboard_writer.add_scalar("val_" + k, val_epoch_metric_dict[k], epoch + 1) + tensorboard_writer.add_scalar( + "val_" + k, val_epoch_metric_dict[k], epoch + 1 + ) val_epoch_metric = val_epoch_metric_dict.values() val_epoch_metric = sum(val_epoch_metric) / len(val_epoch_metric) tensorboard_writer.add_scalar("val_metric", val_epoch_metric, epoch + 1) @@ -390,7 +439,10 @@ def main(): ) ) - print(f"train completed, best_metric: {best_val_epoch_metric:.4f} " f"at epoch: {best_val_epoch}") + print( + f"train completed, best_metric: {best_val_epoch_metric:.4f} " + f"at epoch: {best_val_epoch}" + ) tensorboard_writer.close() From c543d8cb2c09e79c675d0bd6ce926a49a84a167c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 18 Mar 2023 10:02:53 +0000 Subject: [PATCH 2/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- detection/generate_transforms.py | 6 +-- detection/luna16_training.py | 93 +++++++++----------------------- 2 files changed, 26 insertions(+), 73 deletions(-) diff --git a/detection/generate_transforms.py b/detection/generate_transforms.py index 6d98a18e09..79e1638a40 100644 --- a/detection/generate_transforms.py +++ b/detection/generate_transforms.py @@ -26,7 +26,7 @@ RandRotateBox90d, RandZoomBoxd, ConvertBoxModed, - StandardizeEmptyBoxd + StandardizeEmptyBoxd, ) @@ -291,9 +291,7 @@ def generate_detection_inference_transform( image_meta_key_postfix="meta_dict", affine_lps_to_ras=affine_lps_to_ras, ), - ConvertBoxModed( - box_keys=[pred_box_key], src_mode="xyzxyz", dst_mode=gt_box_mode - ), + ConvertBoxModed(box_keys=[pred_box_key], src_mode="xyzxyz", dst_mode=gt_box_mode), DeleteItemsd(keys=[image_key]), ] ) diff --git a/detection/luna16_training.py b/detection/luna16_training.py index fbf19d72e4..9897697dac 100644 --- a/detection/luna16_training.py +++ b/detection/luna16_training.py @@ -53,14 +53,14 @@ def main(): ) parser.add_argument( "-c", - "--config-file", + "--config-file", default="./config/config_train.json", - help="config json file that stores hyper-parameters", + help="config json file that stores hyper-parameters", ) parser.add_argument( "-v", "--verbose", - default=False, + default=False, action="store_true", help="whether to print verbose detail during training, recommand True when you are not sure about hyper-parameters", ) @@ -183,10 +183,7 @@ def main(): returned_layers=args.returned_layers, ) num_anchors = anchor_generator.num_anchors_per_location()[0] - size_divisible = [ - s * 2 * 2 ** max(args.returned_layers) - for s in feature_extractor.body.conv1.stride - ] + size_divisible = [s * 2 * 2 ** max(args.returned_layers) for s in feature_extractor.body.conv1.stride] net = torch.jit.script( RetinaNet( spatial_dims=args.spatial_dims, @@ -198,9 +195,7 @@ def main(): ) # 3) build detector - detector = RetinaNetDetector( - network=net, anchor_generator=anchor_generator, debug=args.verbose - ).to(device) + detector = RetinaNetDetector(network=net, anchor_generator=anchor_generator, debug=args.verbose).to(device) # set training components detector.set_atss_matcher(num_candidates=4, center_in_gt=False) @@ -236,12 +231,8 @@ def main(): weight_decay=3e-5, nesterov=True, ) - after_scheduler = torch.optim.lr_scheduler.StepLR( - optimizer, step_size=150, gamma=0.1 - ) - scheduler_warmup = GradualWarmupScheduler( - optimizer, multiplier=1, total_epoch=10, after_scheduler=after_scheduler - ) + after_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=150, gamma=0.1) + scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=10, after_scheduler=after_scheduler) scaler = torch.cuda.amp.GradScaler() if amp else None optimizer.zero_grad() optimizer.step() @@ -257,9 +248,7 @@ def main(): max_epochs = 300 epoch_len = len(train_ds) // train_loader.batch_size - w_cls = config_dict.get( - "w_cls", 1.0 - ) # weight between classification loss and box regression loss, default 1.0 + w_cls = config_dict.get("w_cls", 1.0) # weight between classification loss and box regression loss, default 1.0 for epoch in range(max_epochs): # ------------- Training ------------- print("-" * 10) @@ -275,9 +264,7 @@ def main(): for batch_data in train_loader: step += 1 inputs = [ - batch_data_ii["image"].to(device) - for batch_data_i in batch_data - for batch_data_ii in batch_data_i + batch_data_ii["image"].to(device) for batch_data_i in batch_data for batch_data_ii in batch_data_i ] targets = [ dict( @@ -294,10 +281,7 @@ def main(): if amp and (scaler is not None): with torch.cuda.amp.autocast(): outputs = detector(inputs, targets) - loss = ( - w_cls * outputs[detector.cls_key] - + outputs[detector.box_reg_key] - ) + loss = w_cls * outputs[detector.cls_key] + outputs[detector.box_reg_key] scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() @@ -312,9 +296,7 @@ def main(): epoch_cls_loss += outputs[detector.cls_key].detach().item() epoch_box_reg_loss += outputs[detector.box_reg_key].detach().item() print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}") - tensorboard_writer.add_scalar( - "train_loss", loss.detach().item(), epoch_len * epoch + step - ) + tensorboard_writer.add_scalar("train_loss", loss.detach().item(), epoch_len * epoch + step) end_time = time.time() print(f"Training time: {end_time-start_time}s") @@ -329,12 +311,8 @@ def main(): print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") tensorboard_writer.add_scalar("avg_train_loss", epoch_loss, epoch + 1) tensorboard_writer.add_scalar("avg_train_cls_loss", epoch_cls_loss, epoch + 1) - tensorboard_writer.add_scalar( - "avg_train_box_reg_loss", epoch_box_reg_loss, epoch + 1 - ) - tensorboard_writer.add_scalar( - "train_lr", optimizer.param_groups[0]["lr"], epoch + 1 - ) + tensorboard_writer.add_scalar("avg_train_box_reg_loss", epoch_box_reg_loss, epoch + 1) + tensorboard_writer.add_scalar("train_lr", optimizer.param_groups[0]["lr"], epoch + 1) # save last trained model torch.jit.save(detector.network, env_dict["model_path"][:-3] + "_last.pt") @@ -351,15 +329,9 @@ def main(): # if all val_data_i["image"] smaller than args.val_patch_size, no need to use inferer # otherwise, need inferer to handle large input images. use_inferer = not all( - [ - val_data_i["image"][0, ...].numel() - < np.prod(args.val_patch_size) - for val_data_i in val_data - ] + [val_data_i["image"][0, ...].numel() < np.prod(args.val_patch_size) for val_data_i in val_data] ) - val_inputs = [ - val_data_i.pop("image").to(device) for val_data_i in val_data - ] + val_inputs = [val_data_i.pop("image").to(device) for val_data_i in val_data] if amp: with torch.cuda.amp.autocast(): @@ -378,14 +350,9 @@ def main(): draw_img = visualize_one_xy_slice_in_3d_image( gt_boxes=val_data[0]["box"].cpu().detach().numpy(), image=val_inputs[0][0, ...].cpu().detach().numpy(), - pred_boxes=val_outputs[0][detector.target_box_key] - .cpu() - .detach() - .numpy(), - ) - tensorboard_writer.add_image( - "val_img_xy", draw_img.transpose([2, 1, 0]), epoch + 1 + pred_boxes=val_outputs[0][detector.target_box_key].cpu().detach().numpy(), ) + tensorboard_writer.add_image("val_img_xy", draw_img.transpose([2, 1, 0]), epoch + 1) # compute metrics del val_inputs @@ -394,24 +361,17 @@ def main(): iou_fn=box_utils.box_iou, iou_thresholds=coco_metric.iou_thresholds, pred_boxes=[ - val_data_i[detector.target_box_key].cpu().detach().numpy() - for val_data_i in val_outputs_all + val_data_i[detector.target_box_key].cpu().detach().numpy() for val_data_i in val_outputs_all ], pred_classes=[ - val_data_i[detector.target_label_key].cpu().detach().numpy() - for val_data_i in val_outputs_all + val_data_i[detector.target_label_key].cpu().detach().numpy() for val_data_i in val_outputs_all ], pred_scores=[ - val_data_i[detector.pred_score_key].cpu().detach().numpy() - for val_data_i in val_outputs_all - ], - gt_boxes=[ - val_data_i[detector.target_box_key].cpu().detach().numpy() - for val_data_i in val_targets_all + val_data_i[detector.pred_score_key].cpu().detach().numpy() for val_data_i in val_outputs_all ], + gt_boxes=[val_data_i[detector.target_box_key].cpu().detach().numpy() for val_data_i in val_targets_all], gt_classes=[ - val_data_i[detector.target_label_key].cpu().detach().numpy() - for val_data_i in val_targets_all + val_data_i[detector.target_label_key].cpu().detach().numpy() for val_data_i in val_targets_all ], ) val_epoch_metric_dict = coco_metric(results_metric)[0] @@ -419,9 +379,7 @@ def main(): # write to tensorboard event for k in val_epoch_metric_dict.keys(): - tensorboard_writer.add_scalar( - "val_" + k, val_epoch_metric_dict[k], epoch + 1 - ) + tensorboard_writer.add_scalar("val_" + k, val_epoch_metric_dict[k], epoch + 1) val_epoch_metric = val_epoch_metric_dict.values() val_epoch_metric = sum(val_epoch_metric) / len(val_epoch_metric) tensorboard_writer.add_scalar("val_metric", val_epoch_metric, epoch + 1) @@ -439,10 +397,7 @@ def main(): ) ) - print( - f"train completed, best_metric: {best_val_epoch_metric:.4f} " - f"at epoch: {best_val_epoch}" - ) + print(f"train completed, best_metric: {best_val_epoch_metric:.4f} " f"at epoch: {best_val_epoch}") tensorboard_writer.close() From 000c10463874b8a36316894721f7d82dd347e2e2 Mon Sep 17 00:00:00 2001 From: Can Zhao Date: Sat, 18 Mar 2023 06:04:23 -0400 Subject: [PATCH 3/8] typo Signed-off-by: Can Zhao --- detection/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/detection/README.md b/detection/README.md index d41946b0e4..eab3af84d9 100644 --- a/detection/README.md +++ b/detection/README.md @@ -89,6 +89,14 @@ python3 luna16_training.py \ -c ./config/config_train_luna16_16g.json ``` +If you are tuning hyper-parameters, please add "-v" flag, e.g. +```bash +python3 luna16_training.py \ + -e ./config/environment_luna16_fold${i}.json \ + -c ./config/config_train_luna16_16g.json -v +``` +Details about matched anchors during training will be printed out. + For each fold, 95% of the training data is used for training, while the rest 5% is used for validation and model selection. The training and validation curves for 300 epochs of 10 folds are shown below. The upper row shows the training losses for box regression and classification. The bottom row shows the validation mAP and mAR for IoU ranging from 0.1 to 0.5.

@@ -108,14 +116,6 @@ python3 luna16_testing.py \ -c ./config/config_train_luna16_16g.json ``` -If you are tuning hyper-parameters, please add "-v" flag, e.g. -```bash -python3 luna16_testing.py \ - -e ./config/environment_luna16_fold${i}.json \ - -c ./config/config_train_luna16_16g.json -v -``` -Details about matched anchors during training will be printed out. - #### [3.4 LUNA16 Detection Evaluation](./run_luna16_offical_eval.sh) Please download the official LUNA16 evaluation scripts from https://luna16.grand-challenge.org/Evaluation/, and save it as ./evaluation_luna16. Note that the official LUNA16 evaluation scripts are based on python2. From fd85864995e4aaecd3f9fbbb941b994ce1a15037 Mon Sep 17 00:00:00 2001 From: Can Zhao Date: Sat, 18 Mar 2023 06:05:37 -0400 Subject: [PATCH 4/8] typo Signed-off-by: Can Zhao --- detection/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detection/README.md b/detection/README.md index eab3af84d9..875ea6336c 100644 --- a/detection/README.md +++ b/detection/README.md @@ -92,7 +92,7 @@ python3 luna16_training.py \ If you are tuning hyper-parameters, please add "-v" flag, e.g. ```bash python3 luna16_training.py \ - -e ./config/environment_luna16_fold${i}.json \ + -e ./config/environment_luna16_fold0.json \ -c ./config/config_train_luna16_16g.json -v ``` Details about matched anchors during training will be printed out. From a9e95b1cb66d0d75c57750000ff8991a63f90e7c Mon Sep 17 00:00:00 2001 From: Can Zhao Date: Sat, 18 Mar 2023 06:07:11 -0400 Subject: [PATCH 5/8] readme Signed-off-by: Can Zhao --- detection/README.md | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/detection/README.md b/detection/README.md index 875ea6336c..c8a51d5190 100644 --- a/detection/README.md +++ b/detection/README.md @@ -88,13 +88,7 @@ python3 luna16_training.py \ -e ./config/environment_luna16_fold${i}.json \ -c ./config/config_train_luna16_16g.json ``` - -If you are tuning hyper-parameters, please add "-v" flag, e.g. -```bash -python3 luna16_training.py \ - -e ./config/environment_luna16_fold0.json \ - -c ./config/config_train_luna16_16g.json -v -``` +If you are tuning hyper-parameters, please also add `-v` flag. Details about matched anchors during training will be printed out. For each fold, 95% of the training data is used for training, while the rest 5% is used for validation and model selection. From c5381376c1dcc92c46f521c8e6106f2c7be9ee09 Mon Sep 17 00:00:00 2001 From: Can Zhao Date: Sat, 18 Mar 2023 06:07:44 -0400 Subject: [PATCH 6/8] readme Signed-off-by: Can Zhao --- detection/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detection/README.md b/detection/README.md index c8a51d5190..a5314d9bad 100644 --- a/detection/README.md +++ b/detection/README.md @@ -88,7 +88,7 @@ python3 luna16_training.py \ -e ./config/environment_luna16_fold${i}.json \ -c ./config/config_train_luna16_16g.json ``` -If you are tuning hyper-parameters, please also add `-v` flag. +If you are tuning hyper-parameters, please also add `--verbose` flag. Details about matched anchors during training will be printed out. For each fold, 95% of the training data is used for training, while the rest 5% is used for validation and model selection. From 3dc449976978cc5d152b897514f9a318bae23810 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 18 Mar 2023 10:08:26 +0000 Subject: [PATCH 7/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- detection/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detection/README.md b/detection/README.md index a5314d9bad..80661e3873 100644 --- a/detection/README.md +++ b/detection/README.md @@ -88,7 +88,7 @@ python3 luna16_training.py \ -e ./config/environment_luna16_fold${i}.json \ -c ./config/config_train_luna16_16g.json ``` -If you are tuning hyper-parameters, please also add `--verbose` flag. +If you are tuning hyper-parameters, please also add `--verbose` flag. Details about matched anchors during training will be printed out. For each fold, 95% of the training data is used for training, while the rest 5% is used for validation and model selection. From 8b4085f2cfb77c46da57d71ff144dd19309bc701 Mon Sep 17 00:00:00 2001 From: Can Zhao Date: Sat, 18 Mar 2023 06:08:58 -0400 Subject: [PATCH 8/8] copyright Signed-off-by: Can Zhao --- detection/generate_transforms.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/detection/generate_transforms.py b/detection/generate_transforms.py index 79e1638a40..b255532742 100644 --- a/detection/generate_transforms.py +++ b/detection/generate_transforms.py @@ -1,3 +1,14 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import torch import numpy as np from monai.transforms import (