Upload 2 files
Browse files- app.py +58 -19
- requirements.txt +3 -1
app.py
CHANGED
|
@@ -11,19 +11,20 @@ from monai.transforms import (
|
|
| 11 |
LoadImage, Orientation, Compose, ToTensor, Activations,
|
| 12 |
FillHoles, KeepLargestConnectedComponent, AsDiscrete, ScaleIntensityRange
|
| 13 |
)
|
|
|
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
# global params
|
| 17 |
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 18 |
examples_path = [
|
| 19 |
os.path.join(THIS_DIR, 'examples', 'HCC_003.nrrd'),
|
| 20 |
-
os.path.join(THIS_DIR, 'examples', 'HCC_006.nrrd'),
|
| 21 |
os.path.join(THIS_DIR, 'examples', 'HCC_007.nrrd'),
|
| 22 |
os.path.join(THIS_DIR, 'examples', 'HCC_018.nrrd')
|
| 23 |
]
|
| 24 |
models_path = {
|
| 25 |
"liver": os.path.join(THIS_DIR, 'checkpoints', 'liver_3DSegResNetVAE.pth'),
|
| 26 |
-
"tumor": os.path.join(THIS_DIR, 'checkpoints', '
|
| 27 |
}
|
| 28 |
cache_path = {
|
| 29 |
"liver mask": "liver_mask.npy",
|
|
@@ -127,7 +128,7 @@ def load_image(image, slider, selected_slice):
|
|
| 127 |
|
| 128 |
image, annotations = render(image_name, slider, selected_slice)
|
| 129 |
|
| 130 |
-
return f"Your image is successfully loaded! Please use the slider to view the image (zmin: 1, zmax: {input.shape[-1]}).", (image, annotations)
|
| 131 |
|
| 132 |
|
| 133 |
def segment_tumor(image_name):
|
|
@@ -245,44 +246,87 @@ def segment(image, selected_mask, slider, selected_slice):
|
|
| 245 |
|
| 246 |
image, annotations = render(image, slider, selected_slice)
|
| 247 |
|
| 248 |
-
return f"Segmentation is completed
|
| 249 |
|
| 250 |
|
| 251 |
def generate_summary(image):
|
|
|
|
| 252 |
image_name = image.name.split('/')[-1].replace(".nrrd","")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
features = generate_features(mydict[image_name]["img"], mydict[image_name]["liver mask"], mydict[image_name]["tumor mask"])
|
| 254 |
print(features)
|
| 255 |
|
| 256 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
|
| 258 |
|
| 259 |
with gr.Blocks() as app:
|
| 260 |
with gr.Column():
|
| 261 |
gr.Markdown(
|
| 262 |
"""
|
| 263 |
-
#
|
|
|
|
|
|
|
| 264 |
|
| 265 |
-
This tool is designed to assist in the identification and segmentation of
|
| 266 |
|
| 267 |
-
β οΈ Important disclaimer: these model outputs should NOT replace the medical diagnosis of healthcare professionals. For your reference, our model was trained on the [HCC-TACE-Seg dataset](https://www.cancerimagingarchive.net/collection/hcc-tace-seg/) and achieved 0.954 dice score for
|
| 268 |
""")
|
| 269 |
|
| 270 |
with gr.Row():
|
| 271 |
-
comment = gr.Textbox(label='Your tool guide:', value="π Hi there,
|
| 272 |
|
| 273 |
|
| 274 |
with gr.Row():
|
| 275 |
|
| 276 |
with gr.Column(scale=2):
|
| 277 |
image_file = gr.File(label="Step 1: Upload a CT image (.nrrd)", file_count='single', file_types=['.nrrd'], type='filepath')
|
|
|
|
| 278 |
btn_upload = gr.Button("Upload")
|
| 279 |
|
| 280 |
with gr.Column(scale=2):
|
| 281 |
selected_mask = gr.CheckboxGroup(label='Step 2: Select mask to produce', choices=['liver mask', 'tumor mask'], value = ['liver mask'])
|
| 282 |
-
btn_segment = gr.Button("
|
| 283 |
|
| 284 |
with gr.Row():
|
| 285 |
-
slider = gr.Slider(1, 100, step=1, label="
|
| 286 |
selected_slice = gr.State(value=1)
|
| 287 |
|
| 288 |
with gr.Row():
|
|
@@ -295,17 +339,11 @@ with gr.Blocks() as app:
|
|
| 295 |
btn_download_tumor = gr.DownloadButton("Download tumor mask", visible=False)
|
| 296 |
|
| 297 |
with gr.Row():
|
| 298 |
-
report = gr.Textbox(label='Step 4. Generate summary report using AI:')
|
| 299 |
|
| 300 |
with gr.Row():
|
| 301 |
btn_report = gr.Button("Generate summary")
|
| 302 |
-
|
| 303 |
|
| 304 |
-
gr.Examples(
|
| 305 |
-
examples_path,
|
| 306 |
-
[image_file],
|
| 307 |
-
)
|
| 308 |
-
|
| 309 |
btn_upload.click(fn=load_image,
|
| 310 |
inputs=[image_file, slider, selected_slice],
|
| 311 |
outputs=[comment, myimage],
|
|
@@ -323,7 +361,8 @@ with gr.Blocks() as app:
|
|
| 323 |
)
|
| 324 |
|
| 325 |
btn_report.click(fn=generate_summary,
|
| 326 |
-
|
|
|
|
| 327 |
)
|
| 328 |
|
| 329 |
|
|
|
|
| 11 |
LoadImage, Orientation, Compose, ToTensor, Activations,
|
| 12 |
FillHoles, KeepLargestConnectedComponent, AsDiscrete, ScaleIntensityRange
|
| 13 |
)
|
| 14 |
+
import llama_cpp
|
| 15 |
+
import llama_cpp.llama_tokenizer
|
| 16 |
|
| 17 |
|
| 18 |
# global params
|
| 19 |
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 20 |
examples_path = [
|
| 21 |
os.path.join(THIS_DIR, 'examples', 'HCC_003.nrrd'),
|
|
|
|
| 22 |
os.path.join(THIS_DIR, 'examples', 'HCC_007.nrrd'),
|
| 23 |
os.path.join(THIS_DIR, 'examples', 'HCC_018.nrrd')
|
| 24 |
]
|
| 25 |
models_path = {
|
| 26 |
"liver": os.path.join(THIS_DIR, 'checkpoints', 'liver_3DSegResNetVAE.pth'),
|
| 27 |
+
"tumor": os.path.join(THIS_DIR, 'checkpoints', 'tumor_3DSegResNetVAE.pth')
|
| 28 |
}
|
| 29 |
cache_path = {
|
| 30 |
"liver mask": "liver_mask.npy",
|
|
|
|
| 128 |
|
| 129 |
image, annotations = render(image_name, slider, selected_slice)
|
| 130 |
|
| 131 |
+
return f"π Your image is successfully loaded! Please use the slider to view the image (zmin: 1, zmax: {input.shape[-1]}).", (image, annotations)
|
| 132 |
|
| 133 |
|
| 134 |
def segment_tumor(image_name):
|
|
|
|
| 246 |
|
| 247 |
image, annotations = render(image, slider, selected_slice)
|
| 248 |
|
| 249 |
+
return f"π₯³ Segmentation is completed. You can use the slider to view slices or proceed with generating a summary report.", download_liver, download_tumor, (image, annotations)
|
| 250 |
|
| 251 |
|
| 252 |
def generate_summary(image):
|
| 253 |
+
|
| 254 |
image_name = image.name.split('/')[-1].replace(".nrrd","")
|
| 255 |
+
|
| 256 |
+
if "liver mask" not in mydict[image_name] or "tumor mask" not in mydict[image_name]:
|
| 257 |
+
return "β You need to generate both liver and tumor masks before we can create a summary report.", "Not generated"
|
| 258 |
+
|
| 259 |
+
# extract tumor features from CT scan
|
| 260 |
features = generate_features(mydict[image_name]["img"], mydict[image_name]["liver mask"], mydict[image_name]["tumor mask"])
|
| 261 |
print(features)
|
| 262 |
|
| 263 |
+
# initialize LLM pulling from hugging face
|
| 264 |
+
llama = llama_cpp.Llama.from_pretrained(
|
| 265 |
+
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
| 266 |
+
filename="*q8_0.gguf",
|
| 267 |
+
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B"),
|
| 268 |
+
verbose=False
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# openai.api_key = os.environ["OPENAI"]
|
| 272 |
+
system_msg = """
|
| 273 |
+
You are a radiologist. You use a segmentation model that extracts tumor characteristics from CT scans from which you generate a diagnosis report.
|
| 274 |
+
The report should include recommendations for next steps, and a disclaimer that these results should be taken with a grain of salt.
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
user_msg = f"""
|
| 278 |
+
The tumor characteristics are:
|
| 279 |
+
{str(features)}
|
| 280 |
+
Please provide your interpretation of the findings and a differential diagnosis, considering the possibility of liver cancer (hepatocellular carcinoma or metastatic liver lesions).
|
| 281 |
+
"""
|
| 282 |
+
print(user_msg)
|
| 283 |
+
|
| 284 |
+
response = llama.create_chat_completion(
|
| 285 |
+
messages = [
|
| 286 |
+
{"role": "system", "content": system_msg},
|
| 287 |
+
{"role": "user", "content": user_msg}
|
| 288 |
+
],
|
| 289 |
+
temperature=0.7
|
| 290 |
+
)
|
| 291 |
+
print(response)
|
| 292 |
+
|
| 293 |
+
try:
|
| 294 |
+
report = response["choices"][0]["message"]["content"]
|
| 295 |
+
return "π Your AI diagnosis summary report is generated! Please review below. Thank you for trying this tool!", report
|
| 296 |
+
except Exception as e:
|
| 297 |
+
return "Sorry. There was an error in report generation: " + e, "To be generated"
|
| 298 |
|
| 299 |
|
| 300 |
with gr.Blocks() as app:
|
| 301 |
with gr.Column():
|
| 302 |
gr.Markdown(
|
| 303 |
"""
|
| 304 |
+
# MedAssist-Liver: an AI-powered Liver Tumor Segmentation Tool
|
| 305 |
+
|
| 306 |
+
Welcome to explore the power of AI for automated medical image analysis with our user-friendly app!
|
| 307 |
|
| 308 |
+
This tool is designed to assist in the identification and segmentation of liver and tumor from medical images. By uploading a CT scan image, a pre-trained machine learning model will automatically segment the liver and tumor regions. Segmented tumor's characteristics such as shape, size, and location are then analyzed to produce an AI-generated diagnosis report of the liver cancer.
|
| 309 |
|
| 310 |
+
β οΈ Important disclaimer: these model outputs should NOT replace the medical diagnosis of healthcare professionals. For your reference, our model was trained on the [HCC-TACE-Seg dataset](https://www.cancerimagingarchive.net/collection/hcc-tace-seg/) and achieved 0.954 dice score for liver segmentation and 0.570 dice score for tumor segmentation. Improving tumor segmentation is still an active area of research!
|
| 311 |
""")
|
| 312 |
|
| 313 |
with gr.Row():
|
| 314 |
+
comment = gr.Textbox(label='π€ Your tool guide:', value="π Hi there, I will be helping you use this tool. To get started, upload a CT scan image or select one from examples.")
|
| 315 |
|
| 316 |
|
| 317 |
with gr.Row():
|
| 318 |
|
| 319 |
with gr.Column(scale=2):
|
| 320 |
image_file = gr.File(label="Step 1: Upload a CT image (.nrrd)", file_count='single', file_types=['.nrrd'], type='filepath')
|
| 321 |
+
gr.Examples(examples_path, [image_file])
|
| 322 |
btn_upload = gr.Button("Upload")
|
| 323 |
|
| 324 |
with gr.Column(scale=2):
|
| 325 |
selected_mask = gr.CheckboxGroup(label='Step 2: Select mask to produce', choices=['liver mask', 'tumor mask'], value = ['liver mask'])
|
| 326 |
+
btn_segment = gr.Button("Generate Segmentation")
|
| 327 |
|
| 328 |
with gr.Row():
|
| 329 |
+
slider = gr.Slider(1, 100, step=1, label="Image slice: ")
|
| 330 |
selected_slice = gr.State(value=1)
|
| 331 |
|
| 332 |
with gr.Row():
|
|
|
|
| 339 |
btn_download_tumor = gr.DownloadButton("Download tumor mask", visible=False)
|
| 340 |
|
| 341 |
with gr.Row():
|
| 342 |
+
report = gr.Textbox(label='Step 4. Generate summary report using AI:', value="To be generated. ")
|
| 343 |
|
| 344 |
with gr.Row():
|
| 345 |
btn_report = gr.Button("Generate summary")
|
|
|
|
| 346 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
btn_upload.click(fn=load_image,
|
| 348 |
inputs=[image_file, slider, selected_slice],
|
| 349 |
outputs=[comment, myimage],
|
|
|
|
| 361 |
)
|
| 362 |
|
| 363 |
btn_report.click(fn=generate_summary,
|
| 364 |
+
inputs=[image_file],
|
| 365 |
+
outputs=[comment, report]
|
| 366 |
)
|
| 367 |
|
| 368 |
|
requirements.txt
CHANGED
|
@@ -5,4 +5,6 @@ pynrrd==1.0.0
|
|
| 5 |
nibabel==5.2.1
|
| 6 |
scikit-image==0.23.2
|
| 7 |
morphsnakes==2.0.1
|
| 8 |
-
opencv-python==4.9.0.80
|
|
|
|
|
|
|
|
|
| 5 |
nibabel==5.2.1
|
| 6 |
scikit-image==0.23.2
|
| 7 |
morphsnakes==2.0.1
|
| 8 |
+
opencv-python==4.9.0.80
|
| 9 |
+
llama-cpp-python==0.2.63
|
| 10 |
+
transformers==4.40.0
|