|
1 |
| -# # Copyright 2022 MONAI Consortium |
2 |
| -# # Licensed under the Apache License, Version 2.0 (the "License"); |
3 |
| -# # you may not use this file except in compliance with the License. |
4 |
| -# # You may obtain a copy of the License at |
5 |
| -# # http://www.apache.org/licenses/LICENSE-2.0 |
6 |
| -# # Unless required by applicable law or agreed to in writing, software |
7 |
| -# # distributed under the License is distributed on an "AS IS" BASIS, |
8 |
| -# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
9 |
| -# # See the License for the specific language governing permissions and |
10 |
| -# # limitations under the License. |
11 |
| - |
12 |
| -# import numpy as np |
13 |
| - |
14 |
| -# import monai.deploy.core as md |
15 |
| -# from monai.deploy.core import ExecutionContext, Image, InputContext, IOType, Operator, OutputContext |
16 |
| -# from monai.deploy.utils.importutil import optional_import |
17 |
| - |
18 |
| -# DataDefinition, _ = optional_import("clara.viz.core", name="DataDefinition") |
19 |
| -# Widget, _ = optional_import("clara.viz.widgets", name="Widget") |
20 |
| -# display, _ = optional_import("IPython.display", name="display") |
21 |
| -# interactive, _ = optional_import("ipywidgets", name="interactive") |
22 |
| -# Dropdown, _ = optional_import("ipywidgets", name="Dropdown") |
23 |
| -# Box, _ = optional_import("ipywidgets", name="Box") |
24 |
| -# VBox, _ = optional_import("ipywidgets", name="VBox") |
25 |
| - |
26 |
| - |
27 |
| -# @md.input("image", Image, IOType.IN_MEMORY) |
28 |
| -# @md.input("seg_image", Image, IOType.IN_MEMORY) |
| 1 | +# Copyright 2022-2023 MONAI Consortium |
| 2 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 3 | +# you may not use this file except in compliance with the License. |
| 4 | +# You may obtain a copy of the License at |
| 5 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 6 | +# Unless required by applicable law or agreed to in writing, software |
| 7 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 8 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 9 | +# See the License for the specific language governing permissions and |
| 10 | +# limitations under the License. |
| 11 | + |
| 12 | +import numpy as np |
| 13 | + |
| 14 | +from monai.deploy.core import Fragment, Image, Operator, OperatorSpec |
| 15 | +from monai.deploy.utils.importutil import optional_import |
| 16 | + |
| 17 | +DataDefinition, _ = optional_import("clara.viz.core", name="DataDefinition") |
| 18 | +Widget, _ = optional_import("clara.viz.widgets", name="Widget") |
| 19 | +display, _ = optional_import("IPython.display", name="display") |
| 20 | +interactive, _ = optional_import("ipywidgets", name="interactive") |
| 21 | +Dropdown, _ = optional_import("ipywidgets", name="Dropdown") |
| 22 | +Box, _ = optional_import("ipywidgets", name="Box") |
| 23 | +VBox, _ = optional_import("ipywidgets", name="VBox") |
| 24 | + |
| 25 | + |
29 | 26 | # @md.env(pip_packages=["clara.viz.core", "clara.viz.widgets", "IPython"])
|
30 |
| -# class ClaraVizOperator(Operator): |
31 |
| -# """ |
32 |
| -# This operator uses Clara Viz to provide interactive view of a 3D volume including segmentation mask. |
33 |
| -# """ |
34 |
| - |
35 |
| -# def __init__(self): |
36 |
| -# """Constructor of the operator.""" |
37 |
| -# super().__init__() |
38 |
| - |
39 |
| -# @staticmethod |
40 |
| -# def _build_array(image, order): |
41 |
| -# numpy_array = image.asnumpy() |
42 |
| - |
43 |
| -# array = DataDefinition.Array(array=numpy_array, order=order) |
44 |
| -# array.element_size = [1.0] |
45 |
| -# array.element_size.append(image.metadata().get("col_pixel_spacing", 1.0)) |
46 |
| -# array.element_size.append(image.metadata().get("row_pixel_spacing", 1.0)) |
47 |
| -# array.element_size.append(image.metadata().get("depth_pixel_spacing", 1.0)) |
48 |
| - |
49 |
| -# # the renderer is expecting data in RIP order (Right Inferior Posterior) which results in |
50 |
| -# # this matrix |
51 |
| -# target_affine_transform = [ |
52 |
| -# [-1.0, 0.0, 0.0, 0.0], |
53 |
| -# [0.0, 0.0, 1.0, 0.0], |
54 |
| -# [0.0, -1.0, 0.0, 0.0], |
55 |
| -# [0.0, 0.0, 0.0, 1.0], |
56 |
| -# ] |
57 |
| - |
58 |
| -# dicom_affine_transform = image.metadata().get("dicom_affine_transform", np.identity(4)) |
59 |
| - |
60 |
| -# affine_transform = np.matmul(target_affine_transform, dicom_affine_transform) |
61 |
| - |
62 |
| -# array.permute_axes = [ |
63 |
| -# 0, |
64 |
| -# max(range(3), key=lambda k: abs(affine_transform[0][k])) + 1, |
65 |
| -# max(range(3), key=lambda k: abs(affine_transform[1][k])) + 1, |
66 |
| -# max(range(3), key=lambda k: abs(affine_transform[2][k])) + 1, |
67 |
| -# ] |
68 |
| - |
69 |
| -# array.flip_axes = [ |
70 |
| -# False, |
71 |
| -# affine_transform[0][array.permute_axes[1] - 1] < 0.0, |
72 |
| -# affine_transform[1][array.permute_axes[2] - 1] < 0.0, |
73 |
| -# affine_transform[2][array.permute_axes[3] - 1] < 0.0, |
74 |
| -# ] |
75 |
| - |
76 |
| -# return array |
77 |
| - |
78 |
| -# def compute(self, op_input: InputContext, op_output: OutputContext, context: ExecutionContext): |
79 |
| -# """Displays the input image and segmentation mask |
80 |
| - |
81 |
| -# Args: |
82 |
| -# op_input (InputContext): An input context for the operator. |
83 |
| -# op_output (OutputContext): An output context for the operator. |
84 |
| -# context (ExecutionContext): An execution context for the operator. |
85 |
| -# """ |
86 |
| -# input_image = op_input.get("image") |
87 |
| -# if not input_image: |
88 |
| -# raise ValueError("Input image is not found.") |
89 |
| -# input_seg_image = op_input.get("seg_image") |
90 |
| -# if not input_seg_image: |
91 |
| -# raise ValueError("Input segmentation image is not found.") |
92 |
| - |
93 |
| -# # build the data definition |
94 |
| -# data_definition = DataDefinition() |
95 |
| - |
96 |
| -# data_definition.arrays.append(self._build_array(input_image, "DXYZ")) |
97 |
| - |
98 |
| -# data_definition.arrays.append(self._build_array(input_seg_image, "MXYZ")) |
99 |
| - |
100 |
| -# widget = Widget() |
101 |
| -# widget.select_data_definition(data_definition) |
102 |
| -# # default view mode is 'CINEMATIC' switch to 'SLICE_SEGMENTATION' since we have no transfer functions defined |
103 |
| -# widget.settings["Views"][0]["mode"] = "SLICE_SEGMENTATION" |
104 |
| -# widget.settings["Views"][0]["cameraName"] = "Top" |
105 |
| -# widget.set_settings() |
106 |
| - |
107 |
| -# # add controls |
108 |
| -# def set_view_mode(view_mode): |
109 |
| -# widget.settings["Views"][0]["mode"] = view_mode |
110 |
| -# if view_mode == "CINEMATIC": |
111 |
| -# widget.settings["Views"][0]["cameraName"] = "Perspective" |
112 |
| -# elif widget.settings["Views"][0]["cameraName"] == "Perspective": |
113 |
| -# widget.settings["Views"][0]["cameraName"] = "Top" |
114 |
| -# widget.set_settings() |
115 |
| - |
116 |
| -# widget_view_mode = interactive( |
117 |
| -# set_view_mode, |
118 |
| -# view_mode=Dropdown( |
119 |
| -# options=[("Cinematic", "CINEMATIC"), ("Slice", "SLICE"), ("Slice Segmentation", "SLICE_SEGMENTATION")], |
120 |
| -# value="SLICE_SEGMENTATION", |
121 |
| -# description="View mode", |
122 |
| -# ), |
123 |
| -# ) |
124 |
| - |
125 |
| -# def set_camera(camera): |
126 |
| -# if widget.settings["Views"][0]["mode"] != "CINEMATIC": |
127 |
| -# widget.settings["Views"][0]["cameraName"] = camera |
128 |
| -# widget.set_settings() |
129 |
| - |
130 |
| -# widget_camera = interactive( |
131 |
| -# set_camera, camera=Dropdown(options=["Top", "Right", "Front"], value="Top", description="Camera") |
132 |
| -# ) |
133 |
| - |
134 |
| -# display(Box([widget, VBox([widget_view_mode, widget_camera])])) |
| 27 | +class ClaraVizOperator(Operator): |
| 28 | + """ |
| 29 | + This operator uses Clara Viz to provide interactive view of a 3D volume including segmentation mask. |
| 30 | +
|
| 31 | + Named input(s): |
| 32 | + image: Image object of the input image, including key metadata, e.g. pixel spacings and orientations. |
| 33 | + seg_image: Image object of the segmentation image derived from the input image. |
| 34 | + """ |
| 35 | + |
| 36 | + def __init__(self, fragement: Fragment, *args, **kwargs): |
| 37 | + """Constructor of the operator. |
| 38 | +
|
| 39 | + Args: |
| 40 | + fragment (Fragment): An instance of the Application class which is derived from Fragment. |
| 41 | + """ |
| 42 | + |
| 43 | + self.input_name_image = "image" |
| 44 | + self.input_name_seg_image = "seg_image" |
| 45 | + |
| 46 | + super().__init__(fragement, *args, **kwargs) |
| 47 | + |
| 48 | + def setup(self, spec: OperatorSpec): |
| 49 | + spec.input(self.input_name_image) |
| 50 | + spec.input(self.input_name_seg_image) |
| 51 | + # There is no output for downstream receiver(s), but interactive UI. |
| 52 | + |
| 53 | + @staticmethod |
| 54 | + def _build_array(image, order): |
| 55 | + numpy_array = image.asnumpy() |
| 56 | + |
| 57 | + array = DataDefinition.Array(array=numpy_array, order=order) |
| 58 | + array.element_size = [1.0] |
| 59 | + array.element_size.append(image.metadata().get("col_pixel_spacing", 1.0)) |
| 60 | + array.element_size.append(image.metadata().get("row_pixel_spacing", 1.0)) |
| 61 | + array.element_size.append(image.metadata().get("depth_pixel_spacing", 1.0)) |
| 62 | + |
| 63 | + # the renderer is expecting data in RIP order (Right Inferior Posterior) which results in |
| 64 | + # this matrix |
| 65 | + target_affine_transform = [ |
| 66 | + [-1.0, 0.0, 0.0, 0.0], |
| 67 | + [0.0, 0.0, 1.0, 0.0], |
| 68 | + [0.0, -1.0, 0.0, 0.0], |
| 69 | + [0.0, 0.0, 0.0, 1.0], |
| 70 | + ] |
| 71 | + |
| 72 | + dicom_affine_transform = image.metadata().get("dicom_affine_transform", np.identity(4)) |
| 73 | + |
| 74 | + affine_transform = np.matmul(target_affine_transform, dicom_affine_transform) |
| 75 | + |
| 76 | + array.permute_axes = [ |
| 77 | + 0, |
| 78 | + max(range(3), key=lambda k: abs(affine_transform[0][k])) + 1, |
| 79 | + max(range(3), key=lambda k: abs(affine_transform[1][k])) + 1, |
| 80 | + max(range(3), key=lambda k: abs(affine_transform[2][k])) + 1, |
| 81 | + ] |
| 82 | + |
| 83 | + array.flip_axes = [ |
| 84 | + False, |
| 85 | + affine_transform[0][array.permute_axes[1] - 1] < 0.0, |
| 86 | + affine_transform[1][array.permute_axes[2] - 1] < 0.0, |
| 87 | + affine_transform[2][array.permute_axes[3] - 1] < 0.0, |
| 88 | + ] |
| 89 | + |
| 90 | + return array |
| 91 | + |
| 92 | + def compute(self, op_input, op_output, context): |
| 93 | + """Displays the input image and segmentation mask |
| 94 | +
|
| 95 | + Args: |
| 96 | + op_input (InputContext): An input context for the operator. |
| 97 | + op_output (OutputContext): An output context for the operator. |
| 98 | + context (ExecutionContext): An execution context for the operator. |
| 99 | + """ |
| 100 | + input_image = op_input.receive(self.input_name_image) |
| 101 | + if not input_image or isinstance(input_image, Image): |
| 102 | + raise ValueError(f"Input image not found or its type {type(Image)} not Image.") |
| 103 | + input_seg_image = op_input.receive(self.input_name_seg_image) |
| 104 | + if not input_seg_image: |
| 105 | + raise ValueError(f"Input segmentation image not found or its type {type(Image)} not Image.") |
| 106 | + |
| 107 | + # build the data definition |
| 108 | + data_definition = DataDefinition() |
| 109 | + |
| 110 | + data_definition.arrays.append(self._build_array(input_image, "DXYZ")) |
| 111 | + |
| 112 | + data_definition.arrays.append(self._build_array(input_seg_image, "MXYZ")) |
| 113 | + |
| 114 | + widget = Widget() |
| 115 | + widget.select_data_definition(data_definition) |
| 116 | + # default view mode is 'CINEMATIC' switch to 'SLICE_SEGMENTATION' since we have no transfer functions defined |
| 117 | + widget.settings["Views"][0]["mode"] = "SLICE_SEGMENTATION" |
| 118 | + widget.settings["Views"][0]["cameraName"] = "Top" |
| 119 | + widget.set_settings() |
| 120 | + |
| 121 | + # add controls |
| 122 | + def set_view_mode(view_mode): |
| 123 | + widget.settings["Views"][0]["mode"] = view_mode |
| 124 | + if view_mode == "CINEMATIC": |
| 125 | + widget.settings["Views"][0]["cameraName"] = "Perspective" |
| 126 | + elif widget.settings["Views"][0]["cameraName"] == "Perspective": |
| 127 | + widget.settings["Views"][0]["cameraName"] = "Top" |
| 128 | + widget.set_settings() |
| 129 | + |
| 130 | + widget_view_mode = interactive( |
| 131 | + set_view_mode, |
| 132 | + view_mode=Dropdown( |
| 133 | + options=[("Cinematic", "CINEMATIC"), ("Slice", "SLICE"), ("Slice Segmentation", "SLICE_SEGMENTATION")], |
| 134 | + value="SLICE_SEGMENTATION", |
| 135 | + description="View mode", |
| 136 | + ), |
| 137 | + ) |
| 138 | + |
| 139 | + def set_camera(camera): |
| 140 | + if widget.settings["Views"][0]["mode"] != "CINEMATIC": |
| 141 | + widget.settings["Views"][0]["cameraName"] = camera |
| 142 | + widget.set_settings() |
| 143 | + |
| 144 | + widget_camera = interactive( |
| 145 | + set_camera, camera=Dropdown(options=["Top", "Right", "Front"], value="Top", description="Camera") |
| 146 | + ) |
| 147 | + |
| 148 | + display(Box([widget, VBox([widget_view_mode, widget_camera])])) |
0 commit comments