Skip to content

Commit 53a9ae4

Browse files
committed
removed py39 req / fixed small bugs
1 parent 3e29db4 commit 53a9ae4

6 files changed

Lines changed: 25 additions & 24 deletions

File tree

bioencoder/core/utils.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ def build_optim(model, optimizer_params, scheduler_params, loss_params):
324324
return {"criterion": criterion, "optimizer": optimizer, "scheduler": scheduler, "loss_optimizer": loss_optimizer}
325325

326326

327-
def compute_embeddings(loader, model):
327+
def compute_embeddings(loader, model, scaler=None):
328328
"""Computes the embeddings and corresponding labels for a dataset.
329329
330330
Parameters:
@@ -342,7 +342,11 @@ def compute_embeddings(loader, model):
342342

343343
for images, labels in loader:
344344
images = images.cuda()
345-
embed = model(images)
345+
if scaler:
346+
with torch.cuda.amp.autocast():
347+
embed = model(images)
348+
else:
349+
embed = model(images)
346350
if total_embeddings is None:
347351
total_embeddings = embed.detach().cpu()
348352
total_labels = labels.detach().cpu()
@@ -644,7 +648,8 @@ def save_augmented_sample(data_dir, transform, n_samples, seed):
644648
# Load dataset
645649
dataset = ImageFolder(root=os.path.join(data_dir, "train"))
646650
save_dir = os.path.join(data_dir, "aug_sample")
647-
shutil.rmtree(save_dir)
651+
if os.path.isdir(save_dir):
652+
shutil.rmtree(save_dir)
648653
os.makedirs(save_dir, exist_ok=True)
649654

650655
## reverse image net transforms

bioencoder/scripts/model_explorer.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,8 @@ def model_explorer(
7171

7272
## parse config
7373
backbone = hyperparams["model"]["backbone"]
74-
num_classes = hyperparams["model"]["num_classes"]
74+
num_classes = hyperparams["model"].get("num_classes", None)
7575
stage = hyperparams["model"]["stage"]
76-
device = hyperparams.get("device", "cuda")
7776

7877
## get swa path
7978
ckpt_pretrained = os.path.join(root_dir, "weights", run_name, stage, "swa")
@@ -106,18 +105,10 @@ def model_explorer(
106105
)
107106

108107
if uploaded_file is not None:
109-
110-
## load image
111-
image = Image.open(uploaded_file)
112-
113108
# Display the uploaded image
109+
image = Image.open(uploaded_file).convert('RGB')
114110
st.sidebar.image(image, caption="Input Image", use_column_width=True)
115111

116-
## apply transforms and load into device
117-
image = np.asarray(image)
118-
image = transform(image=image)["image"]
119-
image = image.unsqueeze(0).to(device)
120-
121112
# Generate visualizations
122113
selected = option_menu(None, vis_funcs, icons=['list' for _ in range(len(vis_funcs))], menu_icon="cast", orientation="horizontal")
123114
if selected == 'Filters':
@@ -154,7 +145,6 @@ def model_explorer(
154145
target = st.selectbox("Select a target", class_names)
155146
result = vis.contrast_cam(model, model.encoder,image,target_layer=[layer], target_category=class_names.index(target))
156147
st.pyplot(result)
157-
158148

159149

160150
if __name__ == "__main__":

bioencoder/vis/methods.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def visualize_activations(model, module, img, max_acts = 64, save_path = None, d
5252
Plots the activations of a module recorded during a forward pass on an image
5353
"""
5454
model.to(device)
55-
# img_t = preprocess_image(img).to(device)
55+
img_t = preprocess_image(img).to(device)
5656
acts = [0]
5757

5858
def hook_fn(self, input, output):
@@ -87,7 +87,7 @@ def saliency_map(model, img, device = 'cuda', save_path = None):
8787

8888
model.eval()
8989
model.to(device)
90-
# img_t = preprocess_image(img).to(device)
90+
img_t = preprocess_image(img).to(device)
9191
img_t.requires_grad = True
9292
img_t.retain_grad() #added this line
9393

@@ -120,7 +120,7 @@ def grad_cam(model, module, img, target_layer = ["4"], target_category= None, de
120120
grad_cam = GradCam(model = model, feature_module = module,
121121
target_layer_names = target_layer, use_cuda = use_cuda)
122122

123-
# img_t = preprocess_image(img).to(device)
123+
img_t = preprocess_image(img).to(device)
124124

125125
grayscale_cam = grad_cam(img_t, target_category)
126126

@@ -167,7 +167,7 @@ def contrast_cam(model, module, img, target_layer = ["4"], target_category= None
167167
contrast_cam = ContrastCam(model = model, feature_module = module,
168168
target_layer_names = target_layer, use_cuda = use_cuda)
169169

170-
# img_t = preprocess_image(img).to(device)
170+
img_t = preprocess_image(img).to(device)
171171

172172
assert(target_category != None), "Please specify a target category"
173173
grayscale_cam = contrast_cam(img_t, target_category)
@@ -204,4 +204,4 @@ def contrast_cam(model, module, img, target_layer = ["4"], target_category= None
204204

205205
if save_path:
206206
fig1.savefig(save_path)
207-
return fig1
207+
return fig1

config-templates/train_stage1.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,17 @@ criterion:
3131
params:
3232
temperature: 0.1 # Temperature parameter for contrastive loss
3333

34-
img_size: 384 # Image size for training and validation
34+
img_size: &size 384 # Image size for training and validation
3535

3636
augmentations: # augmentations to be applied - see https://github.com/agporto/BioEncoder/blob/main/help/05-options.md#augmentations
3737
sample_save: True # Whether to save a sample of augmented images
3838
sample_n: 10 # Number of augmented image samples per class to save
3939
sample_seed: 42 # Seed for random sample
4040
transforms:
4141
- RandomResizedCrop: # Randomly resize and crop the image
42+
height: *size
43+
width: *size
44+
scale: !!python/tuple [0.7,1]
4245
- Flip: # Randomly flip the image horizontally
4346
- RandomRotate90: # Randomly rotate the image by 90 degrees
4447
- MedianBlur: # Apply median blur with a probability

config-templates/train_stage2.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,14 +32,17 @@ criterion:
3232
classes: 100 # Number of classes (adjust based on actual number of classes)
3333
smoothing: 0.01 # Smoothing factor for label smoothing
3434

35-
img_size: 384 # Image size for training and validation
35+
img_size: &size 384 # Image size for training and validation
3636

3737
augmentations: # augmentations to be applied - see https://github.com/agporto/BioEncoder/blob/main/help/05-options.md#augmentations
3838
sample_save: True # Whether to save a sample of augmented images
3939
sample_n: 10 # Number of augmented image samples per class to save
4040
sample_seed: 42 # Seed for random sample
41-
transforms:
41+
transforms:
4242
- RandomResizedCrop: # Randomly resize and crop the image
43+
height: *size
44+
width: *size
45+
scale: !!python/tuple [0.7,1]
4346
- Flip: # Randomly flip the image horizontally
4447
- RandomRotate90: # Randomly rotate the image by 90 degrees
4548
- MedianBlur: # Apply median blur with a probability

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ authors = [
1010
]
1111
description = "A metric learning toolkit"
1212
readme = "README.md"
13-
requires-python = "==3.9.*"
13+
requires-python = ">=3.9"
1414
keywords = ["metric learning", "biology"]
1515
dynamic = ["dependencies"]
1616
version = "0.3.1"

0 commit comments

Comments
 (0)