9
9
# See the License for the specific language governing permissions and
10
10
# limitations under the License.
11
11
12
- from typing import Text
13
-
14
- import monai .deploy .core as md
15
- from monai .deploy .core import (
16
- Application ,
17
- DataPath ,
18
- ExecutionContext ,
19
- Image ,
20
- InputContext ,
21
- IOType ,
22
- Operator ,
23
- OutputContext ,
24
- )
25
- from monai .deploy .operators import DICOMTextSRWriterOperator , EquipmentInfo , ModelInfo
12
+ import logging
13
+ import os
14
+ from pathlib import Path
15
+ from typing import Optional , Text
16
+
17
+ from monai .deploy .conditions import CountCondition
18
+ from monai .deploy .core import AppContext , Application , ConditionType , Fragment , Image , Operator , OperatorSpec
19
+ from monai .deploy .operators .dicom_text_sr_writer_operator import DICOMTextSRWriterOperator , EquipmentInfo , ModelInfo
26
20
from monai .transforms import AddChannel , Compose , EnsureType , ScaleIntensity
27
21
28
22
MEDNIST_CLASSES = ["AbdomenCT" , "BreastMRI" , "CXR" , "ChestCT" , "Hand" , "HeadCT" ]
29
23
30
24
31
- @md .input ("image" , DataPath , IOType .DISK )
32
- @md .output ("image" , Image , IOType .IN_MEMORY )
33
- @md .env (pip_packages = ["pillow" ])
25
+ # @md.env(pip_packages=["pillow"])
34
26
class LoadPILOperator (Operator ):
35
27
"""Load image from the given input (DataPath) and set numpy array to the output (Image)."""
36
28
37
- def compute (self , op_input : InputContext , op_output : OutputContext , context : ExecutionContext ):
29
+ DEFAULT_INPUT_FOLDER = Path .cwd () / "input"
30
+ DEFAULT_OUTPUT_NAME = "image"
31
+
32
+ # For now, need to have the input folder as an instance attribute, set on init.
33
+ # If dynamically changing the input folder, per compute, then use a (optional) input port to convey the
34
+ # value of the input folder, which is then emitted by a upstream operator.
35
+ def __init__ (
36
+ self ,
37
+ fragment : Fragment ,
38
+ * args ,
39
+ input_folder : Path = DEFAULT_INPUT_FOLDER ,
40
+ output_name : str = DEFAULT_OUTPUT_NAME ,
41
+ ** kwargs ,
42
+ ):
43
+ """Creates an loader object with the input folder and the output port name overrides as needed.
44
+
45
+ Args:
46
+ fragment (Fragment): An instance of the Application class which is derived from Fragment.
47
+ input_folder (Path): Folder from which to load input file(s).
48
+ Defaults to `input` in the current working directory.
49
+ output_name (str): Name of the output port, which is an image object. Defaults to `image`.
50
+ """
51
+
52
+ self ._logger = logging .getLogger ("{}.{}" .format (__name__ , type (self ).__name__ ))
53
+ self .input_path = input_folder
54
+ self .index = 0
55
+ self .output_name_image = (
56
+ output_name .strip () if output_name and len (output_name .strip ()) > 0 else LoadPILOperator .DEFAULT_OUTPUT_NAME
57
+ )
58
+
59
+ super ().__init__ (fragment , * args , ** kwargs )
60
+
61
+ def setup (self , spec : OperatorSpec ):
62
+ """Set up the named input and output port(s)"""
63
+ spec .output (self .output_name_image )
64
+
65
+ def compute (self , op_input , op_output , context ):
38
66
import numpy as np
39
67
from PIL import Image as PILImage
40
68
41
- input_path = op_input .get ().path
69
+ # Input path is stored in the object attribute, but could change to use a named port if need be.
70
+ input_path = self .input_path
42
71
if input_path .is_dir ():
43
- input_path = next (input_path .glob ("*.*" )) # take the first file
72
+ input_path = next (self . input_path .glob ("*.*" )) # take the first file
44
73
45
74
image = PILImage .open (input_path )
46
75
image = image .convert ("L" ) # convert to greyscale image
47
76
image_arr = np .asarray (image )
48
77
49
78
output_image = Image (image_arr ) # create Image domain object with a numpy array
50
- op_output .set (output_image )
79
+ op_output .emit (output_image , self . output_name_image ) # cannot omit the name even if single output.
51
80
52
81
53
- @md .input ("image" , Image , IOType .IN_MEMORY )
54
- @md .output ("result_text" , Text , IOType .IN_MEMORY )
55
- @md .env (pip_packages = ["monai" ])
82
+ # @md.env(pip_packages=["monai"])
56
83
class MedNISTClassifierOperator (Operator ):
57
- """Classifies the given image and returns the class name."""
84
+ """Classifies the given image and returns the class name.
85
+
86
+ Named inputs:
87
+ image: Image object for which to generate the classification.
88
+ output_folder: Optional, the path to save the results JSON file, overridingthe the one set on __init__
89
+
90
+ Named output:
91
+ result_text: The classification results in text.
92
+ """
93
+
94
+ DEFAULT_OUTPUT_FOLDER = Path .cwd () / "classification_results"
95
+ # For testing the app directly, the model should be at the following path.
96
+ MODEL_LOCAL_PATH = Path (os .environ .get ("HOLOSCAN_MODEL_PATH" , Path .cwd () / "model/model.ts" ))
97
+
98
+ def __init__ (
99
+ self ,
100
+ frament : Fragment ,
101
+ * args ,
102
+ model_name : Optional [str ] = "" ,
103
+ model_path : Path = MODEL_LOCAL_PATH ,
104
+ output_folder : Path = DEFAULT_OUTPUT_FOLDER ,
105
+ ** kwargs ,
106
+ ):
107
+ """Creates an instance with the reference back to the containing application/fragment.
108
+
109
+ fragment (Fragment): An instance of the Application class which is derived from Fragment.
110
+ model_name (str, optional): Name of the model. Default to "" for single model app.
111
+ model_path (Path): Path to the model file. Defaults to model/models.ts of current working dir.
112
+ output_folder (Path, optional): output folder for saving the classification results JSON file.
113
+ """
114
+
115
+ # the names used for the model inference input and output
116
+ self ._input_dataset_key = "image"
117
+ self ._pred_dataset_key = "pred"
118
+
119
+ # The names used for the operator input and output
120
+ self .input_name_image = "image"
121
+ self .output_name_result = "result_text"
122
+
123
+ # The name of the optional input port for passing data to override the output folder path.
124
+ self .input_name_output_folder = "output_folder"
125
+
126
+ # The output folder set on the object can be overriden at each compute by data in the optional named input
127
+ self .output_folder = output_folder
128
+
129
+ # Need the name when there are multiple models loaded
130
+ self ._model_name = model_name .strip () if isinstance (model_name , str ) else ""
131
+ # Need the path to load the models when they are not loaded in the execution context
132
+ self .model_path = model_path
133
+
134
+ # This needs to be at the end of the constructor.
135
+ super ().__init__ (frament , * args , ** kwargs )
136
+
137
+ def setup (self , spec : OperatorSpec ):
138
+ """Set up the operator named input and named output, both are in-memory objects."""
139
+
140
+ spec .input (self .input_name_image )
141
+ spec .input (self .input_name_output_folder ).condition (ConditionType .NONE ) # Optional for overriding.
142
+ spec .output (self .output_name_result ).condition (ConditionType .NONE ) # Not forcing a downstream receiver.
58
143
59
144
@property
60
145
def transform (self ):
61
146
return Compose ([AddChannel (), ScaleIntensity (), EnsureType ()])
62
147
63
- def compute (self , op_input : InputContext , op_output : OutputContext , context : ExecutionContext ):
148
+ def compute (self , op_input , op_output , context ):
64
149
import json
65
150
66
151
import torch
67
152
68
- img = op_input .get ( ).asnumpy () # (64, 64), uint8
153
+ img = op_input .receive ( self . input_name_image ).asnumpy () # (64, 64), uint8. Input validation can be added.
69
154
image_tensor = self .transform (img ) # (1, 64, 64), torch.float64
70
155
image_tensor = image_tensor [None ].float () # (1, 1, 64, 64), torch.float32
71
156
72
157
device = torch .device ("cuda" if torch .cuda .is_available () else "cpu" )
73
158
image_tensor = image_tensor .to (device )
74
159
75
- model = context .models .get () # get a TorchScriptModel object
160
+ # Need to get the model from context, when it is re-implemented, and for now, load it directly here.
161
+ # model = context.models.get() # get a TorchScriptModel object
162
+ model = torch .jit .load (self .model_path , map_location = device )
76
163
77
164
with torch .no_grad ():
78
165
outputs = model (image_tensor )
@@ -81,37 +168,45 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe
81
168
82
169
result = MEDNIST_CLASSES [output_classes [0 ]] # get the class name
83
170
print (result )
84
- op_output .set (result , "result_text" )
85
-
86
- # Get output (folder) path and create the folder if not exists
87
- # The following gets the App context's output path, instead the operator's.
88
- output_folder = context .output .get ().path
89
- output_folder .mkdir (parents = True , exist_ok = True )
171
+ op_output .emit (result , self .output_name_result )
90
172
91
- # Write result to "output.json"
92
- output_path = output_folder / "output.json"
173
+ # Get output folder, with value in optional input port overriding the obj attribute
174
+ output_folder_on_compute = op_input .receive (self .input_name_output_folder ) or self .output_folder
175
+ Path .mkdir (output_folder_on_compute , parents = True , exist_ok = True ) # Let exception bubble up if raised.
176
+ output_path = output_folder_on_compute / "output.json"
93
177
with open (output_path , "w" ) as fp :
94
178
json .dump (result , fp )
95
179
96
180
97
- @md .resource (cpu = 1 , gpu = 1 , memory = "1Gi" )
181
+ # @md.resource(cpu=1, gpu=1, memory="1Gi")
98
182
class App (Application ):
99
183
"""Application class for the MedNIST classifier."""
100
184
101
185
def compose (self ):
102
- load_pil_op = LoadPILOperator ()
103
- classifier_op = MedNISTClassifierOperator ()
186
+ app_context = AppContext ({}) # Let it figure out all the attributes without overriding
187
+ app_input_path = Path (app_context .input_path )
188
+ app_output_path = Path (app_context .output_path )
189
+ model_path = Path (app_context .model_path )
190
+ load_pil_op = LoadPILOperator (self , CountCondition (self , 1 ), input_folder = app_input_path , name = "pil_loader_op" )
191
+ classifier_op = MedNISTClassifierOperator (
192
+ self , output_folder = app_output_path , model_path = model_path , name = "classifier_op"
193
+ )
104
194
105
195
my_model_info = ModelInfo ("MONAI WG Trainer" , "MEDNIST Classifier" , "0.1" , "xyz" )
106
196
my_equipment = EquipmentInfo (manufacturer = "MOANI Deploy App SDK" , manufacturer_model = "DICOM SR Writer" )
107
197
my_special_tags = {"SeriesDescription" : "Not for clinical use. The result is for research use only." }
108
198
dicom_sr_operator = DICOMTextSRWriterOperator (
109
- copy_tags = False , model_info = my_model_info , equipment_info = my_equipment , custom_tags = my_special_tags
199
+ self ,
200
+ copy_tags = False ,
201
+ model_info = my_model_info ,
202
+ equipment_info = my_equipment ,
203
+ custom_tags = my_special_tags ,
204
+ output_folder = app_output_path ,
110
205
)
111
206
112
- self .add_flow (load_pil_op , classifier_op )
113
- self .add_flow (classifier_op , dicom_sr_operator , {"result_text" : "classification_result" })
207
+ self .add_flow (load_pil_op , classifier_op , {( "image" , "image" )} )
208
+ self .add_flow (classifier_op , dicom_sr_operator , {( "result_text" , "text" ) })
114
209
115
210
116
211
if __name__ == "__main__" :
117
- App (do_run = True )
212
+ App (). run ( )
0 commit comments