1
1
import json
2
- from typing import Dict , Text
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Any , Dict , List , Optional , Sequence , Text , Tuple , Union
3
5
4
6
import torch
5
7
6
- import monai .deploy .core as md
7
8
from monai .data import DataLoader , Dataset
8
- from monai .deploy .core import ExecutionContext , Image , InputContext , IOType , Operator , OutputContext
9
+ from monai .deploy .core import ConditionType , Fragment , Image , Operator , OperatorSpec
9
10
from monai .deploy .operators .monai_seg_inference_operator import InMemImageReader
10
11
from monai .transforms import (
11
12
Activations ,
20
21
)
21
22
22
23
23
- @md .input ("image" , Image , IOType .IN_MEMORY )
24
- @md .output ("result_text" , Text , IOType .IN_MEMORY )
24
+ # @md.input("image", Image, IOType.IN_MEMORY)
25
+ # @md.output("result_text", Text, IOType.IN_MEMORY)
26
+ # @env(pip_packages=["monai~=1.1.0"])
25
27
class ClassifierOperator (Operator ):
26
- def __init__ (self ):
27
- super ().__init__ ()
28
+ """Performs breast density classification using a DL model with an image converted from a DICOM MG series.
29
+
30
+ Named inputs:
31
+ image: Image object for which to generate the classification.
32
+ output_folder: Optional, the path to save the results JSON file, overridingthe the one set on __init__
33
+
34
+ Named output:
35
+ result_text: The classification results in text.
36
+ """
37
+
38
+ DEFAULT_OUTPUT_FOLDER = Path .cwd () / "classification_results"
39
+ # For testing the app directly, the model should be at the following path.
40
+ MODEL_LOCAL_PATH = Path (os .environ .get ("HOLOSCAN_MODEL_PATH" , Path .cwd () / "model/model.ts" ))
41
+
42
+ def __init__ (
43
+ self ,
44
+ frament : Fragment ,
45
+ * args ,
46
+ model_name : Optional [str ] = "" ,
47
+ model_path : Path = MODEL_LOCAL_PATH ,
48
+ output_folder : Path = DEFAULT_OUTPUT_FOLDER ,
49
+ ** kwargs ,
50
+ ):
51
+ """Creates an instance with the reference back to the containing application/fragment.
52
+
53
+ fragment (Fragment): An instance of the Application class which is derived from Fragment.
54
+ model_name (str, optional): Name of the model. Default to "" for single model app.
55
+ model_path (Path): Path to the model file. Defaults to model/models.ts of current working dir.
56
+ output_folder (Path, optional): output folder for saving the classification results JSON file.
57
+ """
58
+
59
+ # the names used for the model inference input and output
28
60
self ._input_dataset_key = "image"
29
61
self ._pred_dataset_key = "pred"
30
62
63
+ # The names used for the operator input and output
64
+ self .input_name_image = "image"
65
+ self .output_name_result = "result_text"
66
+
67
+ # The name of the optional input port for passing data to override the output folder path.
68
+ self .input_name_output_folder = "output_folder"
69
+
70
+ # The output folder set on the object can be overriden at each compute by data in the optional named input
71
+ self .output_folder = output_folder
72
+
73
+ # Need the name when there are multiple models loaded
74
+ self ._model_name = model_name .strip () if isinstance (model_name , str ) else ""
75
+ # Need the path to load the models when they are not loaded in the execution context
76
+ self .model_path = model_path
77
+
78
+ # This needs to be at the end of the constructor.
79
+ super ().__init__ (frament , * args , ** kwargs )
80
+
81
+ def setup (self , spec : OperatorSpec ):
82
+ """Set up the operator named input and named output, both are in-memory objects."""
83
+
84
+ spec .input (self .input_name_image )
85
+ spec .input (self .input_name_output_folder ).condition (ConditionType .NONE ) # Optional for overriding.
86
+ spec .output (self .output_name_result ).condition (ConditionType .NONE ) # Not forcing a downstream receiver.
87
+
31
88
def _convert_dicom_metadata_datatype (self , metadata : Dict ):
32
89
if not metadata :
33
90
return metadata
@@ -55,16 +112,26 @@ def _convert_dicom_metadata_datatype(self, metadata: Dict):
55
112
56
113
return metadata
57
114
58
- def compute (self , op_input : InputContext , op_output : OutputContext , context : ExecutionContext ):
59
- input_image = op_input .get ("image" )
115
+ def compute (self , op_input , op_output , context ):
116
+ input_image = op_input .receive (self .input_name_image )
117
+ if not input_image :
118
+ raise ValueError ("Input image is not found." )
119
+ if not isinstance (input_image , Image ):
120
+ raise ValueError (f"Input is not the required type: { type (Image )!r} " )
121
+
60
122
_reader = InMemImageReader (input_image )
61
123
input_img_metadata = self ._convert_dicom_metadata_datatype (input_image .metadata ())
62
124
img_name = str (input_img_metadata .get ("SeriesInstanceUID" , "Img_in_context" ))
63
125
64
- output_path = context .output .get ().path
126
+ output_folder_on_compute = op_input .receive (self .input_name_output_folder )
127
+ output_folder = output_folder_on_compute if output_folder_on_compute else self .output_folder
128
+ Path .mkdir (output_folder , parents = True , exist_ok = True ) # Let exception bubble up if raised.
65
129
66
130
device = torch .device ("cuda" if torch .cuda .is_available () else "cpu" )
67
- model = context .models .get ()
131
+
132
+ # Need to get the model from context, when it is re-implemented, and for now, load it directly here.
133
+ # model = context.models.get()
134
+ model = torch .jit .load (self .model_path , map_location = device )
68
135
69
136
pre_transforms = self .pre_process (_reader )
70
137
post_transforms = self .post_process ()
@@ -82,15 +149,12 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe
82
149
result_dict = (
83
150
"A " + ":" + str (out [0 ]) + " B " + ":" + str (out [1 ]) + " C " + ":" + str (out [2 ]) + " D " + ":" + str (out [3 ])
84
151
)
85
- result_dict_out = {"A" : str (out [0 ]), "B" : str (out [1 ]), "C" : str (out [2 ]), "D" : str (out [3 ])}
86
- output_folder = context .output .get ().path
87
- output_folder .mkdir (parents = True , exist_ok = True )
88
152
89
153
output_path = output_folder / "output.json"
90
154
with open (output_path , "w" ) as fp :
91
155
json .dump (result_dict , fp )
92
156
93
- op_output .set (result_dict , "result_text" )
157
+ op_output .emit (result_dict , "result_text" )
94
158
95
159
def pre_process (self , image_reader ) -> Compose :
96
160
return Compose (
0 commit comments