Skip to content

Commit 00484e0

Browse files
authored
Fix deprecated error in active learning tutorials (#1844)
Fixes #1843 ### Description A few sentences describing the changes proposed in this pull request. ### Checks <!--- Put an `x` in all the boxes that apply, and remove the not applicable items --> - [x] Avoid including large-size files in the PR. - [ ] Clean up long text outputs from code cells in the notebook. - [ ] For security purposes, please check the contents and remove any sensitive info such as user names and private key. - [ ] Ensure (1) hyperlinks and markdown anchors are working (2) use relative paths for tutorial repo files (3) put figure and graphs in the `./figure` folder - [ ] Notebook runs automatically `./runner.sh -t <path to .ipynb file>` --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com>
1 parent 3722155 commit 00484e0

File tree

5 files changed

+28
-35
lines changed

5 files changed

+28
-35
lines changed

active_learning/liver_tumor_al/active_learning.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def main():
155155
# Model Definition
156156
device = torch.device("cuda:0")
157157
network = UNet(
158-
dimensions=3,
158+
spatial_dims=3,
159159
in_channels=1,
160160
out_channels=3,
161161
channels=(16, 32, 64, 128, 256),
@@ -187,7 +187,7 @@ def main():
187187
b_max=1.0,
188188
clip=True,
189189
),
190-
CropForegroundd(keys=["image", "label"], source_key="image"),
190+
CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True),
191191
SpatialPadd(keys=["image", "label"], spatial_size=(96, 96, 96)),
192192
RandCropByPosNegLabeld(
193193
keys=["image", "label"],
@@ -225,7 +225,7 @@ def main():
225225
b_max=1.0,
226226
clip=True,
227227
),
228-
CropForegroundd(keys=["image", "label"], source_key="image"),
228+
CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True),
229229
EnsureTyped(keys=["image", "label"]),
230230
]
231231
)
@@ -240,7 +240,7 @@ def main():
240240
mode=("bilinear"),
241241
),
242242
ScaleIntensityRanged(keys="image", a_min=-21, a_max=189, b_min=0.0, b_max=1.0, clip=True),
243-
CropForegroundd(keys=("image"), source_key="image"),
243+
CropForegroundd(keys=("image"), source_key="image", allow_smaller=True),
244244
EnsureTyped(keys=["image"]),
245245
]
246246
)
@@ -315,7 +315,7 @@ def main():
315315
unl_loader = DataLoader(unl_ds, batch_size=1)
316316

317317
# Calculation of Epochs based on steps
318-
max_epochs = np.int(args.steps / (np.ceil(len(train_d) / args.batch_size)))
318+
max_epochs = int(args.steps / (np.ceil(len(train_d) / args.batch_size)))
319319
print("Epochs Estimated are {} for Active Iter {} with {} Vols".format(max_epochs, active_iter, len(train_d)))
320320

321321
# Model Training begins for one active iteration
@@ -393,7 +393,7 @@ def main():
393393
prev_best_ckpt = os.path.join(active_model_dir, "model.pt")
394394

395395
device = torch.device("cuda:0")
396-
ckpt = torch.load(prev_best_ckpt)
396+
ckpt = torch.load(prev_best_ckpt, weights_only=True)
397397
network.load_state_dict(ckpt)
398398
network.to(device=device)
399399

@@ -487,16 +487,16 @@ def main():
487487

488488
variance_dims = np.shape(variance)
489489
score_list.append(np.nanmean(variance))
490-
name_list.append(unl_data["image_meta_dict"]["filename_or_obj"][0])
490+
name_list.append(unl_data["image"].meta["filename_or_obj"][0])
491491
print(
492492
"Variance for image: {} is: {}".format(
493-
unl_data["image_meta_dict"]["filename_or_obj"][0], np.nanmean(variance)
493+
unl_data["image"].meta["filename_or_obj"][0], np.nanmean(variance)
494494
)
495495
)
496496

497497
# Plot with matplotlib and save all slices
498498
plt.figure(1)
499-
plt.imshow(np.squeeze(variance[:, :, np.int(variance_dims[2] / 2)]))
499+
plt.imshow(np.squeeze(variance[:, :, int(variance_dims[2] / 2)]))
500500
plt.colorbar()
501501
plt.title("Dropout Uncertainty")
502502
fig_path = os.path.join(fig_base_dir, "active_{}_file_{}.png".format(active_iter, counter))

active_learning/liver_tumor_al/results_uncertainty_analysis.ipynb

Lines changed: 10 additions & 20 deletions
Large diffs are not rendered by default.

active_learning/tool_tracking_al/active_learning.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,7 @@ def main():
281281
unl_loader = DataLoader(unl_ds, batch_size=1)
282282

283283
# Calculation of Epochs based on steps
284-
max_epochs = np.int(args.steps / (np.ceil(len(train_d) / args.batch_size)))
284+
max_epochs = int(args.steps / (np.ceil(len(train_d) / args.batch_size)))
285285
print("Epochs Estimated are {} for Active Iter {} with {} Vols".format(max_epochs, active_iter, len(train_d)))
286286

287287
# Keep track of Best_metric, it is being used as IoU and not Dice
@@ -379,7 +379,7 @@ def main():
379379
prev_best_ckpt = os.path.join(active_model_dir, "model.pt")
380380

381381
device = torch.device("cuda:0")
382-
ckpt = torch.load(prev_best_ckpt)
382+
ckpt = torch.load(prev_best_ckpt, weights_only=True)
383383
network.load_state_dict(ckpt)
384384
network.to(device=device)
385385

active_learning/tool_tracking_al/results_uncertainty_analysis.ipynb

Lines changed: 3 additions & 3 deletions
Large diffs are not rendered by default.

vista_2d/vista_2d_tutorial_monai.ipynb

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,10 @@
3737
"\n",
3838
"The notebook demonstrates a complete pipeline for training and evaluating a cell segmentation model using the MONAI framework and the Segment Anything Model (SAM) on the Cellpose dataset. Please note we only use a small subset of the cellpose dataset for this tutorial.\n",
3939
"\n",
40-
"For additional information about VISTA-2D please also refer the [MONAI bundle](https://github.com/Project-MONAI/VISTA/tree/main/vista2d) and the [technical blog post](https://developer.nvidia.com/blog/advancing-cell-segmentation-and-morphology-analysis-with-nvidia-ai-foundation-model-vista-2d/)."
40+
"For additional information about VISTA-2D please also refer the [MONAI bundle](https://github.com/Project-MONAI/VISTA/tree/main/vista2d) and the [technical blog post](https://developer.nvidia.com/blog/advancing-cell-segmentation-and-morphology-analysis-with-nvidia-ai-foundation-model-vista-2d/).\n",
41+
"\n",
42+
"For dependency-related issues, you can also check this note:\n",
43+
"https://github.com/Project-MONAI/model-zoo/tree/dev/models/vista2d#dependencies."
4144
]
4245
},
4346
{

0 commit comments

Comments
 (0)