Skip to content

Commit 029425e

Browse files
authored
Merge pull request #1 from staskh/python3.11
Upgrade to PyTorch 2.3 and Python 3.11
2 parents 3ce98fb + 6690695 commit 029425e

11 files changed

Lines changed: 163 additions & 133 deletions

File tree

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,6 @@ dmypy.json
127127

128128
# Pyre type checker
129129
.pyre/
130+
# do not save Results in Git
131+
Results/*
132+
M1_Retinal_Image_quality_EyePACS/test_outside/results_ensemble.csv

.vscode/launch.json

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
{
2+
// Use IntelliSense to learn about possible attributes.
3+
// Hover to view descriptions of existing attributes.
4+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5+
"version": "0.2.0",
6+
"configurations": [
7+
{
8+
"name": "Python Debugger: Current File with Arguments",
9+
"type": "debugpy",
10+
"request": "launch",
11+
"program": "${file}",
12+
"console": "integratedTerminal",
13+
"cwd": "${fileDirname}",
14+
"justMyCode": true,
15+
"args": "${command:pickArgs}"
16+
},
17+
{
18+
"name": "Debug",
19+
"type": "debugpy",
20+
"request": "launch",
21+
"program": "${file}",
22+
"console": "integratedTerminal",
23+
"justMyCode": true,
24+
"cwd": "${fileDirname}"
25+
}
26+
]
27+
}

LOCAL.md

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,18 @@
22

33
### Requirements
44

5-
1. Linux is preferred. For windows, install [MinGW-w64](https://www.mingw-w64.org/) for using commands below to set enviroment.
5+
1. Linux or Mac are preferred. For windows, install [MinGW-w64](https://www.mingw-w64.org/) for using commands below to set enviroment.
66
2. Anaconda or miniconda installed.
7-
3. python=3.6, cudatoolkit=11.0, torch=1.7, etc. (installation steps below)
8-
4. GPU is essential.
7+
3. python=3.11, torch=2.3, etc. (installation steps below)
8+
4. GPU is essential - NVIDIA (cuda) or M2 (mps).
99

1010

1111
### Package installation
1212

1313
Step 1: create virtual environment:
1414
```bash
1515
conda update conda
16-
conda create -n automorph python=3.6 -y
16+
conda create -n automorph python=3.11 -y
1717
```
1818

1919
Step 2: Activate virtual environment and clone the code.
@@ -23,16 +23,15 @@ git clone https://github.com/rmaphoh/AutoMorph.git
2323
cd AutoMorph
2424
```
2525

26-
Step 3: install pytorch 1.7 and cudatoolkit 11.0
26+
Step 3: install pytorch 2.3
2727
```bash
28-
conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch -y
28+
conda install pytorch==2.3.1 -c pytorch -y
2929
```
3030

3131
Step 4: install other packages:
3232
```bash
3333
pip install --ignore-installed certifi
3434
pip install -r requirement.txt
35-
pip install efficientnet_pytorch
3635
```
3736

3837
### Running

M0_Preprocess/fundus_prep.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def _get_radius_by_mask_center(mask,center):
7272
# radius=
7373
index=np.where(mask>0)
7474
d_int=np.sqrt((index[0]-center[0])**2+(index[1]-center[1])**2)
75-
b_count=np.bincount(np.ceil(d_int).astype(np.int))
75+
b_count=np.bincount(np.ceil(d_int).astype(int))
7676
radius=np.where(b_count>b_count.max()*0.995)[0].max()
7777
return radius
7878

@@ -129,7 +129,7 @@ def mask_image(img,mask):
129129
def remove_back_area(img,bbox=None,border=None):
130130
image=img
131131
if border is None:
132-
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=np.int)
132+
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=int)
133133
image=image[border[0]:border[1],border[2]:border[3],...]
134134
return image,border
135135

M1_Retinal_Image_quality_EyePACS/test_outside.py

Lines changed: 30 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,19 @@
1212
import pandas as pd
1313
import torch.nn as nn
1414
from tqdm import tqdm
15-
from pycm import *
16-
import matplotlib
17-
import matplotlib.pyplot as plt
15+
#from pycm import *
16+
# import matplotlib
17+
# import matplotlib.pyplot as plt
1818
from dataset import BasicDataset_OUT
1919
from torch.utils.data import DataLoader
2020
from model import Resnet101_fl, InceptionV3_fl, Densenet161_fl, Resnext101_32x8d_fl, MobilenetV2_fl, Vgg16_bn_fl, Efficientnet_fl
2121

2222

23-
font = {
24-
'weight' : 'normal',
25-
'size' : 18}
26-
plt.rc('font',family='Times New Roman')
27-
matplotlib.rc('font', **font)
23+
# font = {
24+
# 'weight' : 'normal',
25+
# 'size' : 18}
26+
# plt.rc('font',family='Times New Roman')
27+
# matplotlib.rc('font', **font)
2828

2929
def test_net(model_fl_1,
3030
model_fl_2,
@@ -187,10 +187,19 @@ def get_args():
187187
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
188188
args = get_args()
189189

190-
torch.cuda.set_device(args.local_rank)
191-
device = torch.device("cuda", args.local_rank)
192-
193-
#logging.info(f'Using device {device}')
190+
191+
# Check if CUDA is available
192+
if torch.cuda.is_available():
193+
logging.info("CUDA is available. Using CUDA...")
194+
device = torch.device("cuda",args.local_rank)
195+
elif torch.backends.mps.is_available(): # Check if MPS is available (for macOS)
196+
logging.info("MPS is available. Using MPS...")
197+
device = torch.device("mps")
198+
else:
199+
logging.info("Neither CUDA nor MPS is available. Using CPU...")
200+
device = torch.device("cpu")
201+
202+
logging.info(f'Using device {device}')
194203

195204
test_dir = args.test_dir
196205
dataset=args.dataset
@@ -243,31 +252,31 @@ def get_args():
243252
model_fl_7.to(device=device)
244253
model_fl_8.to(device=device)
245254

246-
map_location = {'cuda:%d' % 0: 'cuda:%d' % args.local_rank}
255+
#map_location = {'cuda:%d' % 0: 'cuda:%d' % args.local_rank}
247256
if args.load:
248257
model_fl_1.load_state_dict(
249-
torch.load(checkpoint_path_1, map_location="cuda:0")
258+
torch.load(checkpoint_path_1, map_location=device)
250259
)
251260
model_fl_2.load_state_dict(
252-
torch.load(checkpoint_path_2, map_location="cuda:0")
261+
torch.load(checkpoint_path_2, map_location=device)
253262
)
254263
model_fl_3.load_state_dict(
255-
torch.load(checkpoint_path_3, map_location="cuda:0")
264+
torch.load(checkpoint_path_3, map_location=device)
256265
)
257266
model_fl_4.load_state_dict(
258-
torch.load(checkpoint_path_4, map_location="cuda:0")
267+
torch.load(checkpoint_path_4, map_location=device)
259268
)
260269
model_fl_5.load_state_dict(
261-
torch.load(checkpoint_path_5, map_location="cuda:0")
270+
torch.load(checkpoint_path_5, map_location=device)
262271
)
263272
model_fl_6.load_state_dict(
264-
torch.load(checkpoint_path_6, map_location="cuda:0")
273+
torch.load(checkpoint_path_6, map_location=device)
265274
)
266275
model_fl_7.load_state_dict(
267-
torch.load(checkpoint_path_7, map_location="cuda:0")
276+
torch.load(checkpoint_path_7, map_location=device)
268277
)
269278
model_fl_8.load_state_dict(
270-
torch.load(checkpoint_path_8, map_location="cuda:0")
279+
torch.load(checkpoint_path_8, map_location=device)
271280
)
272281

273282
# faster convolutions, but more memory

M2_Artery_vein/test_outside.py

Lines changed: 36 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -280,8 +280,18 @@ def get_args():
280280

281281
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
282282
args = get_args()
283-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
284-
#logging.info(f'Using device {device}')
283+
# Check if CUDA is available
284+
if torch.cuda.is_available():
285+
logging.info("CUDA is available. Using CUDA...")
286+
device = torch.device("cuda:0")
287+
elif torch.backends.mps.is_available(): # Check if MPS is available (for macOS)
288+
logging.info("MPS is available. Using MPS...")
289+
device = torch.device("mps")
290+
else:
291+
logging.info("Neither CUDA nor MPS is available. Using CPU...")
292+
device = torch.device("cpu")
293+
294+
logging.info(f'Using device {device}')
285295

286296
img_size = Define_image_size(args.uniform, args.dataset)
287297
dataset_name = args.dataset
@@ -347,79 +357,79 @@ def get_args():
347357

348358

349359
for i in range(1):
350-
net_G_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_all.pth'))
351-
net_G_A_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_A.pth'))
352-
net_G_V_1.load_state_dict(torch.load(checkpoint_saved_1 + 'CP_best_F1_V.pth'))
360+
net_G_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_all.pth',map_location=device))
361+
net_G_A_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_A.pth',map_location=device))
362+
net_G_V_1.load_state_dict(torch.load(checkpoint_saved_1 + 'CP_best_F1_V.pth',map_location=device))
353363
net_G_1.eval()
354364
net_G_A_1.eval()
355365
net_G_V_1.eval()
356366
net_G_1.to(device=device)
357367
net_G_A_1.to(device=device)
358368
net_G_V_1.to(device=device)
359369

360-
net_G_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_all.pth'))
361-
net_G_A_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_A.pth'))
362-
net_G_V_2.load_state_dict(torch.load(checkpoint_saved_2 + 'CP_best_F1_V.pth'))
370+
net_G_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_all.pth',map_location=device))
371+
net_G_A_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_A.pth',map_location=device))
372+
net_G_V_2.load_state_dict(torch.load(checkpoint_saved_2 + 'CP_best_F1_V.pth',map_location=device))
363373
net_G_2.eval()
364374
net_G_A_2.eval()
365375
net_G_V_2.eval()
366376
net_G_2.to(device=device)
367377
net_G_A_2.to(device=device)
368378
net_G_V_2.to(device=device)
369379

370-
net_G_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_all.pth'))
371-
net_G_A_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_A.pth'))
372-
net_G_V_3.load_state_dict(torch.load(checkpoint_saved_3 + 'CP_best_F1_V.pth'))
380+
net_G_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_all.pth',map_location=device))
381+
net_G_A_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_A.pth',map_location=device))
382+
net_G_V_3.load_state_dict(torch.load(checkpoint_saved_3 + 'CP_best_F1_V.pth',map_location=device))
373383
net_G_3.eval()
374384
net_G_A_3.eval()
375385
net_G_V_3.eval()
376386
net_G_3.to(device=device)
377387
net_G_A_3.to(device=device)
378388
net_G_V_3.to(device=device)
379389

380-
net_G_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_all.pth'))
381-
net_G_A_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_A.pth'))
382-
net_G_V_4.load_state_dict(torch.load(checkpoint_saved_4 + 'CP_best_F1_V.pth'))
390+
net_G_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_all.pth',map_location=device))
391+
net_G_A_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_A.pth',map_location=device))
392+
net_G_V_4.load_state_dict(torch.load(checkpoint_saved_4 + 'CP_best_F1_V.pth',map_location=device))
383393
net_G_4.eval()
384394
net_G_A_4.eval()
385395
net_G_V_4.eval()
386396
net_G_4.to(device=device)
387397
net_G_A_4.to(device=device)
388398
net_G_V_4.to(device=device)
389399

390-
net_G_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_all.pth'))
391-
net_G_A_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_A.pth'))
392-
net_G_V_5.load_state_dict(torch.load(checkpoint_saved_5 + 'CP_best_F1_V.pth'))
400+
net_G_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_all.pth',map_location=device))
401+
net_G_A_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_A.pth',map_location=device))
402+
net_G_V_5.load_state_dict(torch.load(checkpoint_saved_5 + 'CP_best_F1_V.pth',map_location=device))
393403
net_G_5.eval()
394404
net_G_A_5.eval()
395405
net_G_V_5.eval()
396406
net_G_5.to(device=device)
397407
net_G_A_5.to(device=device)
398408
net_G_V_5.to(device=device)
399409

400-
net_G_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_all.pth'))
401-
net_G_A_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_A.pth'))
402-
net_G_V_6.load_state_dict(torch.load(checkpoint_saved_6 + 'CP_best_F1_V.pth'))
410+
net_G_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_all.pth',map_location=device))
411+
net_G_A_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_A.pth',map_location=device))
412+
net_G_V_6.load_state_dict(torch.load(checkpoint_saved_6 + 'CP_best_F1_V.pth',map_location=device))
403413
net_G_6.eval()
404414
net_G_A_6.eval()
405415
net_G_V_6.eval()
406416
net_G_6.to(device=device)
407417
net_G_A_6.to(device=device)
408418
net_G_V_6.to(device=device)
409419

410-
net_G_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_all.pth'))
411-
net_G_A_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_A.pth'))
412-
net_G_V_7.load_state_dict(torch.load(checkpoint_saved_7 + 'CP_best_F1_V.pth'))
420+
net_G_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_all.pth',map_location=device))
421+
net_G_A_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_A.pth',map_location=device))
422+
net_G_V_7.load_state_dict(torch.load(checkpoint_saved_7 + 'CP_best_F1_V.pth',map_location=device))
413423
net_G_7.eval()
414424
net_G_A_7.eval()
415425
net_G_V_7.eval()
416426
net_G_7.to(device=device)
417427
net_G_A_7.to(device=device)
418428
net_G_V_7.to(device=device)
419429

420-
net_G_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_all.pth'))
421-
net_G_A_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_A.pth'))
422-
net_G_V_8.load_state_dict(torch.load(checkpoint_saved_8 + 'CP_best_F1_V.pth'))
430+
net_G_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_all.pth',map_location=device))
431+
net_G_A_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_A.pth',map_location=device))
432+
net_G_V_8.load_state_dict(torch.load(checkpoint_saved_8 + 'CP_best_F1_V.pth',map_location=device))
423433
net_G_8.eval()
424434
net_G_A_8.eval()
425435
net_G_V_8.eval()

M2_Vessel_seg/test_outside_integrated.py

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -203,34 +203,34 @@ def test_net(data_path, batch_size, device, dataset_train, dataset_test, image_s
203203
net_10 = Segmenter(input_channels=3, n_filters = 32, n_classes=1, bilinear=False)
204204

205205

206-
net_1.load_state_dict(torch.load(dir_checkpoint_1 + 'G_best_F1_epoch.pth'))
206+
net_1.load_state_dict(torch.load(dir_checkpoint_1 + 'G_best_F1_epoch.pth',map_location=device))
207207
net_1.eval()
208208
net_1.to(device=device)
209-
net_2.load_state_dict(torch.load(dir_checkpoint_2 + 'G_best_F1_epoch.pth'))
209+
net_2.load_state_dict(torch.load(dir_checkpoint_2 + 'G_best_F1_epoch.pth',map_location=device))
210210
net_2.eval()
211211
net_2.to(device=device)
212-
net_3.load_state_dict(torch.load(dir_checkpoint_3 + 'G_best_F1_epoch.pth'))
212+
net_3.load_state_dict(torch.load(dir_checkpoint_3 + 'G_best_F1_epoch.pth',map_location=device))
213213
net_3.eval()
214214
net_3.to(device=device)
215-
net_4.load_state_dict(torch.load(dir_checkpoint_4 + 'G_best_F1_epoch.pth'))
215+
net_4.load_state_dict(torch.load(dir_checkpoint_4 + 'G_best_F1_epoch.pth',map_location=device))
216216
net_4.eval()
217217
net_4.to(device=device)
218-
net_5.load_state_dict(torch.load(dir_checkpoint_5 + 'G_best_F1_epoch.pth'))
218+
net_5.load_state_dict(torch.load(dir_checkpoint_5 + 'G_best_F1_epoch.pth',map_location=device))
219219
net_5.eval()
220220
net_5.to(device=device)
221-
net_6.load_state_dict(torch.load(dir_checkpoint_6 + 'G_best_F1_epoch.pth'))
221+
net_6.load_state_dict(torch.load(dir_checkpoint_6 + 'G_best_F1_epoch.pth',map_location=device))
222222
net_6.eval()
223223
net_6.to(device=device)
224-
net_7.load_state_dict(torch.load(dir_checkpoint_7 + 'G_best_F1_epoch.pth'))
224+
net_7.load_state_dict(torch.load(dir_checkpoint_7 + 'G_best_F1_epoch.pth',map_location=device))
225225
net_7.eval()
226226
net_7.to(device=device)
227-
net_8.load_state_dict(torch.load(dir_checkpoint_8 + 'G_best_F1_epoch.pth'))
227+
net_8.load_state_dict(torch.load(dir_checkpoint_8 + 'G_best_F1_epoch.pth',map_location=device))
228228
net_8.eval()
229229
net_8.to(device=device)
230-
net_9.load_state_dict(torch.load(dir_checkpoint_9 + 'G_best_F1_epoch.pth'))
230+
net_9.load_state_dict(torch.load(dir_checkpoint_9 + 'G_best_F1_epoch.pth',map_location=device))
231231
net_9.eval()
232232
net_9.to(device=device)
233-
net_10.load_state_dict(torch.load(dir_checkpoint_10 + 'G_best_F1_epoch.pth'))
233+
net_10.load_state_dict(torch.load(dir_checkpoint_10 + 'G_best_F1_epoch.pth',map_location=device))
234234
net_10.eval()
235235
net_10.to(device=device)
236236

@@ -285,9 +285,20 @@ def get_args():
285285

286286
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
287287
args = get_args()
288-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
288+
# Check if CUDA is available
289+
if torch.cuda.is_available():
290+
logging.info("CUDA is available. Using CUDA...")
291+
device = torch.device("cuda:0")
292+
elif torch.backends.mps.is_available(): # Check if MPS is available (for macOS)
293+
logging.info("MPS is available. Using MPS...")
294+
device = torch.device("mps")
295+
else:
296+
logging.info("Neither CUDA nor MPS is available. Using CPU...")
297+
device = torch.device("cpu")
298+
289299
logging.info(f'Using device {device}')
290300

301+
291302
image_size = Define_image_size(args.uniform, args.dataset)
292303
lr = args.lr
293304

0 commit comments

Comments
 (0)