diff --git a/generation/maisi/README.md b/generation/maisi/README.md index 6709e4a84..1d0a9b5d2 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -54,20 +54,18 @@ MAISI is based on the following papers: Network definition is stored in [./configs/config_maisi.json](./configs/config_maisi.json). Training and inference should use the same [./configs/config_maisi.json](./configs/config_maisi.json). ### 2. Model Inference +#### Inference parameters: The information for the inference input, like body region and anatomy to generate, is stored in [./configs/config_infer.json](./configs/config_infer.json). Please feel free to play with it. Here are the details of the parameters. - `"num_output_samples"`: int, the number of output image/mask pairs it will generate. - `"spacing"`: voxel size of generated images. E.g., if set to `[1.5, 1.5, 2.0]`, it will generate images with a resolution of 1.5x1.5x2.0 mm. -- `"output_size"`: volume size of generated images. E.g., if set to `[512, 512, 256]`, it will generate images with size of 512x512x256. They need to be divisible by 16. If you have a small GPU memory size, you should adjust it to small numbers. +- `"output_size"`: volume size of generated images. E.g., if set to `[512, 512, 256]`, it will generate images with size of 512x512x256. They need to be divisible by 16. If you have a small GPU memory size, you should adjust it to small numbers. Note that `"spacing"` and `"output_size"` together decide the output field of view (FOV). For eample, if set them to `[1.5, 1.5, 2.0]`mm and `[512, 512, 256]`, the FOV is 768x768x512 mm. We recommend the FOV in x and y axis to be at least 256mm for head, and at least 384mm for other body regions like abdomen. There is no such restriction for z-axis. - `"controllable_anatomy_size"`: a list of controllable anatomy and its size scale (0--1). E.g., if set to `[["liver", 0.5],["hepatic tumor", 0.3]]`, the generated image will contain liver that have a median size, with size around 50% percentile, and hepatic tumor that is relatively small, with around 30% percentile. The output will contain paired image and segmentation mask for the controllable anatomy. - `"body_region"`: If "controllable_anatomy_size" is not specified, "body_region" will be used to constrain the region of generated images. It needs to be chosen from "head", "chest", "thorax", "abdomen", "pelvis", "lower". - `"anatomy_list"`: If "controllable_anatomy_size" is not specified, the output will contain paired image and segmentation mask for the anatomy in "./configs/label_dict.json". - `"autoencoder_sliding_window_infer_size"`: in order to save GPU memory, we use sliding window inference when decoding latents to image when `"output_size"` is large. This is the patch size of the sliding window. Small value will reduce GPU memory but increase time cost. They need to be divisible by 16. - `"autoencoder_sliding_window_infer_overlap"`: float between 0 and 1. Large value will reduce the stitching artifacts when stitching patches during sliding window inference, but increase time cost. If you do not observe seam lines in the generated image result, you can use a smaller value to save inference time. - -Please refer to [maisi_inference_tutorial.ipynb](maisi_inference_tutorial.ipynb) for the tutorial for MAISI model inference. - #### Execute Inference: To run the inference script, please run: ```bash @@ -75,6 +73,11 @@ export MONAI_DATA_DIRECTORY= python -m scripts.inference -c ./configs/config_maisi.json -i ./configs/config_infer.json -e ./configs/environment.json --random-seed 0 ``` +Please refer to [maisi_inference_tutorial.ipynb](maisi_inference_tutorial.ipynb) for the tutorial for MAISI model inference. + +#### Quality Check: +We have implemented a quality check function for the generated CT images. The main idea behind this function is to ensure that the Hounsfield units (HU) intensity for each organ in the CT images remains within a defined range. For each training image used in the Diffusion network, we computed the median value for a few major organs. Then we summarize the statistics of these median values and save it to [./configs/image_median_statistics.json](./configs/image_median_statistics.json). During inference, for each generated image, we compute the median HU values for the major organs and check whether they fall within the normal range. + ### 3. Model Training Training data preparation can be found in [./data/README.md](./data/README.md) diff --git a/generation/maisi/configs/image_median_statistics.json b/generation/maisi/configs/image_median_statistics.json new file mode 100644 index 000000000..df9665386 --- /dev/null +++ b/generation/maisi/configs/image_median_statistics.json @@ -0,0 +1,72 @@ +{ + "liver": { + "min_median": -14.0, + "max_median": 1000.0, + "percentile_0_5": 9.530000000000001, + "percentile_99_5": 162.0, + "sigma_6_low": -21.596463547885904, + "sigma_6_high": 156.27881534763367, + "sigma_12_low": -110.53410299564568, + "sigma_12_high": 245.21645479539342 + }, + "spleen": { + "min_median": -69.0, + "max_median": 1000.0, + "percentile_0_5": 16.925000000000004, + "percentile_99_5": 184.07500000000073, + "sigma_6_low": -43.133891656525165, + "sigma_6_high": 177.40494997185993, + "sigma_12_low": -153.4033124707177, + "sigma_12_high": 287.6743707860525 + }, + "pancreas": { + "min_median": -124.0, + "max_median": 1000.0, + "percentile_0_5": -29.0, + "percentile_99_5": 145.92000000000007, + "sigma_6_low": -56.59382515620725, + "sigma_6_high": 149.50627399318438, + "sigma_12_low": -159.64387473090306, + "sigma_12_high": 252.5563235678802 + }, + "kidney": { + "min_median": -165.5, + "max_median": 819.0, + "percentile_0_5": -40.0, + "percentile_99_5": 254.61999999999898, + "sigma_6_low": -130.56375604853028, + "sigma_6_high": 267.28163511081016, + "sigma_12_low": -329.4864516282005, + "sigma_12_high": 466.20433069048045 + }, + "lung": { + "min_median": -1000.0, + "max_median": 65.0, + "percentile_0_5": -937.0, + "percentile_99_5": -366.9500000000007, + "sigma_6_low": -1088.5583843889117, + "sigma_6_high": -551.8503346949108, + "sigma_12_low": -1356.912409235912, + "sigma_12_high": -283.4963098479103 + }, + "bone": { + "min_median": 77.5, + "max_median": 1000.0, + "percentile_0_5": 136.45499999999998, + "percentile_99_5": 551.6350000000002, + "sigma_6_low": 71.39901958080469, + "sigma_6_high": 471.9957615639765, + "sigma_12_low": -128.8993514107812, + "sigma_12_high": 672.2941325555623 + }, + "brain": { + "min_median": -1000.0, + "max_median": 238.0, + "percentile_0_5": -951.0, + "percentile_99_5": 126.25, + "sigma_6_low": -304.8208236135867, + "sigma_6_high": 369.5118535139189, + "sigma_12_low": -641.9871621773394, + "sigma_12_high": 706.6781920776717 + } +} diff --git a/generation/maisi/scripts/diff_model_infer.py b/generation/maisi/scripts/diff_model_infer.py index 93dbf8c22..2de5faa25 100644 --- a/generation/maisi/scripts/diff_model_infer.py +++ b/generation/maisi/scripts/diff_model_infer.py @@ -27,7 +27,7 @@ from .diff_model_setting import initialize_distributed, load_config, setup_logging from .sample import ReconModel -from .utils import define_instance, load_autoencoder_ckpt +from .utils import define_instance def set_random_seed(seed: int) -> int: diff --git a/generation/maisi/scripts/infer_controlnet.py b/generation/maisi/scripts/infer_controlnet.py index 6931c31e5..cb4d3c9fc 100644 --- a/generation/maisi/scripts/infer_controlnet.py +++ b/generation/maisi/scripts/infer_controlnet.py @@ -24,7 +24,7 @@ from monai.utils import RankFilter from .sample import ldm_conditional_sample_one_image -from .utils import define_instance, load_autoencoder_ckpt, prepare_maisi_controlnet_json_dataloader, setup_ddp +from .utils import define_instance, prepare_maisi_controlnet_json_dataloader, setup_ddp @torch.inference_mode() diff --git a/generation/maisi/scripts/quality_check.py b/generation/maisi/scripts/quality_check.py new file mode 100644 index 000000000..223732761 --- /dev/null +++ b/generation/maisi/scripts/quality_check.py @@ -0,0 +1,147 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nibabel as nib +import numpy as np + + +def get_masked_data(label_data, image_data, labels): + """ + Extracts and returns the image data corresponding to specified labels within a 3D volume. + + This function efficiently masks the `image_data` array based on the provided `labels` in the `label_data` array. + The function handles cases with both a large and small number of labels, optimizing performance accordingly. + + Args: + label_data (np.ndarray): A NumPy array containing label data, representing different anatomical + regions or classes in a 3D medical image. + image_data (np.ndarray): A NumPy array containing the image data from which the relevant regions + will be extracted. + labels (list of int): A list of integers representing the label values to be used for masking. + + Returns: + np.ndarray: A NumPy array containing the elements of `image_data` that correspond to the specified + labels in `label_data`. If no labels are provided, an empty array is returned. + + Raises: + ValueError: If `image_data` and `label_data` do not have the same shape. + + Example: + label_int_dict = {"liver": [1], "kidney": [5, 14]} + masked_data = get_masked_data(label_data, image_data, label_int_dict["kidney"]) + """ + + # Check if the shapes of image_data and label_data match + if image_data.shape != label_data.shape: + raise ValueError( + f"Shape mismatch: image_data has shape {image_data.shape}, " + f"but label_data has shape {label_data.shape}. They must be the same." + ) + + if not labels: + return np.array([]) # Return an empty array if no labels are provided + + labels = list(set(labels)) # remove duplicate items + + # Optimize performance based on the number of labels + num_label_acceleration_thresh = 3 + if len(labels) >= num_label_acceleration_thresh: + # if many labels, np.isin is faster + mask = np.isin(label_data, labels) + else: + # Use logical OR to combine masks if the number of labels is small + mask = np.zeros_like(label_data, dtype=bool) + for label in labels: + mask = np.logical_or(mask, label_data == label) + + # Retrieve the masked data + masked_data = image_data[mask.astype(bool)] + + return masked_data + + +def is_outlier(statistics, image_data, label_data, label_int_dict): + """ + Perform a quality check on the generated image by comparing its statistics with precomputed thresholds. + + Args: + statistics (dict): Dictionary containing precomputed statistics including mean +/- 3sigma ranges. + image_data (np.ndarray): The image data to be checked, typically a 3D NumPy array. + label_data (np.ndarray): The label data corresponding to the image, used for masking regions of interest. + label_int_dict (dict): Dictionary mapping label names to their corresponding integer lists. + e.g., label_int_dict = {"liver": [1], "kidney": [5, 14]} + + Returns: + dict: A dictionary with labels as keys, each containing the quality check result, + including whether it's an outlier, the median value, and the thresholds used. + If no data is found for a label, the median value will be `None` and `is_outlier` will be `False`. + + Example: + # Example input data + statistics = { + "liver": { + "sigma_6_low": -21.596463547885904, + "sigma_6_high": 156.27881534763367 + }, + "kidney": { + "sigma_6_low": -15.0, + "sigma_6_high": 120.0 + } + } + label_int_dict = { + "liver": [1], + "kidney": [5, 14] + } + image_data = np.random.rand(100, 100, 100) # Replace with actual image data + label_data = np.zeros((100, 100, 100)) # Replace with actual label data + label_data[40:60, 40:60, 40:60] = 1 # Example region for liver + label_data[70:90, 70:90, 70:90] = 5 # Example region for kidney + result = is_outlier(statistics, image_data, label_data, label_int_dict) + """ + outlier_results = {} + + for label_name, stats in statistics.items(): + # Get the thresholds from the statistics + low_thresh = stats["sigma_6_low"] # or "sigma_12_low" depending on your needs + high_thresh = stats["sigma_6_high"] # or "sigma_12_high" depending on your needs + + # Retrieve the corresponding label integers + labels = label_int_dict.get(label_name, []) + masked_data = get_masked_data(label_data, image_data, labels) + masked_data = masked_data[~np.isnan(masked_data)] + + if len(masked_data) == 0 or masked_data.size == 0: + outlier_results[label_name] = { + "is_outlier": False, + "median_value": None, + "low_thresh": low_thresh, + "high_thresh": high_thresh, + } + continue + + # Compute the median of the masked region + median_value = np.nanmedian(masked_data) + + if np.isnan(median_value): + median_value = None + is_outlier = False + else: + # Determine if the median value is an outlier + is_outlier = median_value < low_thresh or median_value > high_thresh + + outlier_results[label_name] = { + "is_outlier": is_outlier, + "median_value": median_value, + "low_thresh": low_thresh, + "high_thresh": high_thresh, + } + + return outlier_results diff --git a/generation/maisi/scripts/sample.py b/generation/maisi/scripts/sample.py index 2257b341e..69a7869a4 100644 --- a/generation/maisi/scripts/sample.py +++ b/generation/maisi/scripts/sample.py @@ -29,6 +29,7 @@ from .augmentation import augmentation from .find_masks import find_masks from .utils import binarize_labels, general_mask_generation_post_process, get_body_region_index_from_mask, remap_labels +from .quality_check import is_outlier class ReconModel(torch.nn.Module): @@ -181,7 +182,7 @@ def ldm_conditional_sample_one_image( noise_scheduler, scale_factor, device, - comebine_label_or, + combine_label_or, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor, @@ -202,7 +203,7 @@ def ldm_conditional_sample_one_image( noise_scheduler: The noise scheduler for the diffusion process. scale_factor (float): Scaling factor for the latent space. device (torch.device): The device to run the computation on. - comebine_label_or (torch.Tensor): The combined label tensor. + combine_label_or (torch.Tensor): The combined label tensor. top_region_index_tensor (torch.Tensor): Tensor specifying the top region index. bottom_region_index_tensor (torch.Tensor): Tensor specifying the bottom region index. spacing_tensor (torch.Tensor): Tensor specifying the spacing. @@ -229,18 +230,18 @@ def ldm_conditional_sample_one_image( logging.info("---- Start generating latent features... ----") start_time = time.time() # generate segmentation mask - comebine_label = comebine_label_or.to(device) + combine_label = combine_label_or.to(device) if ( - output_size[0] != comebine_label.shape[2] - or output_size[1] != comebine_label.shape[3] - or output_size[2] != comebine_label.shape[4] + output_size[0] != combine_label.shape[2] + or output_size[1] != combine_label.shape[3] + or output_size[2] != combine_label.shape[4] ): logging.info( "output_size is not a desired value. Need to interpolate the mask to match with output_size. The result image will be very low quality." ) - comebine_label = torch.nn.functional.interpolate(comebine_label, size=output_size, mode="nearest") + combine_label = torch.nn.functional.interpolate(combine_label, size=output_size, mode="nearest") - controlnet_cond_vis = binarize_labels(comebine_label.as_tensor().long()).half() + controlnet_cond_vis = binarize_labels(combine_label.as_tensor().long()).half() # Generate random noise latents = initialize_noise_latents(latent_shape, device) * noise_factor @@ -301,18 +302,18 @@ def ldm_conditional_sample_one_image( # project output to [-1000, 1000] synthetic_images = synthetic_images * (a_max - a_min) + a_min # regularize background intensities - synthetic_images = crop_img_body_mask(synthetic_images, comebine_label) + synthetic_images = crop_img_body_mask(synthetic_images, combine_label) torch.cuda.empty_cache() - return synthetic_images, comebine_label + return synthetic_images, combine_label -def filter_mask_with_organs(comebine_label, anatomy_list): +def filter_mask_with_organs(combine_label, anatomy_list): """ Filter a mask to only include specified organs. Args: - comebine_label (torch.Tensor): The input mask. + combine_label (torch.Tensor): The input mask. anatomy_list (list): List of organ labels to keep. Returns: @@ -320,31 +321,31 @@ def filter_mask_with_organs(comebine_label, anatomy_list): """ # final output mask file has shape of output_size, contains labels in anatomy_list # it is already interpolated to target size - comebine_label = comebine_label.long() + combine_label = combine_label.long() # filter out the organs that are not in anatomy_list for i in range(len(anatomy_list)): organ = anatomy_list[i] # replace it with a negative value so it will get mixed - comebine_label[comebine_label == organ] = -(i + 1) + combine_label[combine_label == organ] = -(i + 1) # zero-out voxels with value not in anatomy_list - comebine_label[comebine_label > 0] = 0 + combine_label[combine_label > 0] = 0 # output positive values - comebine_label = -comebine_label - return comebine_label + combine_label = -combine_label + return combine_label -def crop_img_body_mask(synthetic_images, comebine_label): +def crop_img_body_mask(synthetic_images, combine_label): """ Crop the synthetic image using a body mask. Args: synthetic_images (torch.Tensor): The synthetic images. - comebine_label (torch.Tensor): The body mask. + combine_label (torch.Tensor): The body mask. Returns: torch.Tensor: The cropped synthetic images. """ - synthetic_images[comebine_label == 0] = -1000 + synthetic_images[combine_label == 0] = -1000 return synthetic_images @@ -385,6 +386,12 @@ def check_input( f"spacing[0] have to be between 0.5 and 3.0 mm, spacing[2] have to be between 0.5 and 5.0 mm, yet got {spacing}." ) + if output_size[0] * spacing[0] < 256: + FOV = [output_size[axis] * spacing[axis] for axis in range(3)] + raise ValueError( + f"`'spacing'({spacing}mm) and 'output_size'({output_size}) together decide the output field of view (FOV). The FOV will be {FOV}mm. We recommend the FOV in x and y axis to be at least 256mm for head, and at least 384mm for other body regions like abdomen. There is no such restriction for z-axis." + ) + # check controllable_anatomy_size format if len(controllable_anatomy_size) > 10: raise ValueError( @@ -497,7 +504,7 @@ def __init__( controllable_anatomy_size, image_output_ext=".nii.gz", label_output_ext=".nii.gz", - quality_check_args=None, + real_img_median_statistics="./configs/image_median_statistics.json", spacing=[1, 1, 1], num_inference_steps=None, mask_generation_num_inference_steps=None, @@ -563,9 +570,26 @@ def __init__( self.autoencoder_sliding_window_infer_size = autoencoder_sliding_window_infer_size self.autoencoder_sliding_window_infer_overlap = autoencoder_sliding_window_infer_overlap - # quality check disabled for this version - self.quality_check_args = quality_check_args + # quality check args + self.max_try_time = 5 # if not pass quality check, will try self.max_try_time times + with open(real_img_median_statistics, "r") as json_file: + self.median_statistics = json.load(json_file) + self.label_int_dict = { + "liver": [1], + "spleen": [3], + "pancreas": [4], + "kidney": [5, 14], + "lung": [28, 29, 30, 31, 31], + "brain": [22], + "hepatic tumor": [26], + "bone lesion": [128], + "lung tumor": [23], + "colon cancer primaries": [27], + "pancreatic tumor": [24], + "bone": list(range(33, 57)) + list(range(63, 98)) + [120, 122, 127], + } + # networks self.autoencoder.eval() self.diffusion_unet.eval() self.controlnet.eval() @@ -635,7 +659,7 @@ def sample_multiple_images(self, num_img): if len(self.controllable_anatomy_size) > 0: # generate a synthetic mask ( - comebine_label_or, + combine_label_or, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor, @@ -645,16 +669,16 @@ def sample_multiple_images(self, num_img): mask_file = item["mask_file"] if_aug = item["if_aug"] ( - comebine_label_or, + combine_label_or, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor, ) = self.read_mask_information(mask_file) if need_resample: - comebine_label_or = self.ensure_output_size_and_spacing(comebine_label_or) + combine_label_or = self.ensure_output_size_and_spacing(combine_label_or) # mask augmentation if if_aug: - comebine_label_or = augmentation(comebine_label_or, self.output_size) + combine_label_or = augmentation(combine_label_or, self.output_size) end_time = time.time() logging.info(f"---- Mask preparation time: {end_time - start_time} seconds ----") torch.cuda.empty_cache() @@ -663,14 +687,16 @@ def sample_multiple_images(self, num_img): try_time = 0 while to_generate: synthetic_images, synthetic_labels = self.sample_one_pair( - comebine_label_or, + combine_label_or, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor, ) - # current quality always return True - pass_quality_check = self.quality_check(synthetic_images) - if pass_quality_check or try_time > 3: + # synthetic image quality check + pass_quality_check = self.quality_check( + synthetic_images.cpu().detach().numpy(), combine_label_or.cpu().detach().numpy() + ) + if pass_quality_check or try_time > self.max_try_time: # save image/label pairs output_postfix = datetime.now().strftime("%Y%m%d_%H%M%S_%f") synthetic_labels.meta["filename_or_obj"] = "sample.nii.gz" @@ -727,7 +753,7 @@ def select_mask(self, candidate_mask_files, num_img): def sample_one_pair( self, - comebine_label_or_aug, + combine_label_or_aug, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor, @@ -736,7 +762,7 @@ def sample_one_pair( Generate a single pair of synthetic image and mask. Args: - comebine_label_or_aug (torch.Tensor): Combined label tensor or augmented label. + combine_label_or_aug (torch.Tensor): Combined label tensor or augmented label. top_region_index_tensor (torch.Tensor): Tensor specifying the top region index. bottom_region_index_tensor (torch.Tensor): Tensor specifying the bottom region index. spacing_tensor (torch.Tensor): Tensor specifying the spacing. @@ -752,7 +778,7 @@ def sample_one_pair( noise_scheduler=self.noise_scheduler, scale_factor=self.scale_factor, device=self.device, - comebine_label_or=comebine_label_or_aug, + combine_label_or=combine_label_or_aug, top_region_index_tensor=top_region_index_tensor, bottom_region_index_tensor=bottom_region_index_tensor, spacing_tensor=spacing_tensor, @@ -828,23 +854,23 @@ def prepare_one_mask_and_meta_info(self, anatomy_size_condtion): Returns: tuple: A tuple containing the prepared mask and associated tensors. """ - comebine_label_or = self.sample_one_mask(anatomy_size=anatomy_size_condtion) + combine_label_or = self.sample_one_mask(anatomy_size=anatomy_size_condtion) # TODO: current mask generation model only can generate 256^3 volumes with 1.5 mm spacing. affine = torch.zeros((4, 4)) affine[0, 0] = 1.5 affine[1, 1] = 1.5 affine[2, 2] = 1.5 affine[3, 3] = 1.0 # dummy - comebine_label_or = MetaTensor(comebine_label_or, affine=affine) - comebine_label_or = self.ensure_output_size_and_spacing(comebine_label_or) + combine_label_or = MetaTensor(combine_label_or, affine=affine) + combine_label_or = self.ensure_output_size_and_spacing(combine_label_or) - top_region_index, bottom_region_index = get_body_region_index_from_mask(comebine_label_or) + top_region_index, bottom_region_index = get_body_region_index_from_mask(combine_label_or) spacing_tensor = torch.FloatTensor(self.spacing).unsqueeze(0).half().to(self.device) * 1e2 top_region_index_tensor = torch.FloatTensor(top_region_index).unsqueeze(0).half().to(self.device) * 1e2 bottom_region_index_tensor = torch.FloatTensor(bottom_region_index).unsqueeze(0).half().to(self.device) * 1e2 - return comebine_label_or, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor + return combine_label_or, top_region_index_tensor, bottom_region_index_tensor, spacing_tensor def sample_one_mask(self, anatomy_size): """ @@ -1006,15 +1032,20 @@ def find_closest_masks(self, num_img): raise ValueError("Cannot find body region with given organ list.") return final_candidates - def quality_check(self, image): + def quality_check(self, image_data, label_data): """ - Perform a quality check on the generated image. This version disabled quality check and always return True. - + Perform a quality check on the generated image. Args: - image (torch.Tensor): The generated image. - + image_data (np.ndarray): The generated image. + label_data (np.ndarray): The corresponding whole body mask. Returns: bool: True if the image passes the quality check, False otherwise. """ - # This version disabled quality check + outlier_results = is_outlier(self.median_statistics, image_data, label_data, self.label_int_dict) + for label, result in outlier_results.items(): + if result.get("is_outlier", False): + logging.info( + f"Generated image quality check for label '{label}' failed: median value {result['median_value']} is outside the acceptable range ({result['low_thresh']} - {result['high_thresh']})." + ) + return False return True