Tanusree88 commited on
Commit
2fd06f4
1 Parent(s): 065f8a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -23
app.py CHANGED
@@ -2,8 +2,7 @@ import os
2
  import zipfile
3
  import numpy as np
4
  import torch
5
- from transformers import ViTForImageClassification
6
- from torch.optim import AdamW
7
  from PIL import Image
8
  from torch.utils.data import Dataset, DataLoader
9
  import streamlit as st
@@ -17,14 +16,12 @@ def extract_zip(zip_file, extract_to):
17
  def preprocess_image(image_path):
18
  ext = os.path.splitext(image_path)[-1].lower()
19
 
20
- if ext in ['.npy']:
21
  image_data = np.load(image_path)
22
  image_tensor = torch.tensor(image_data).float()
23
- if len(image_tensor.shape) == 2: # If the image is 2D (grayscale)
24
- image_tensor = image_tensor.unsqueeze(0) # Add channel dimension
25
- elif len(image_tensor.shape) == 3: # If the image is 3D (height, width, channels)
26
- image_tensor = image_tensor.permute(2, 0, 1).float() # Change to (C, H, W)
27
-
28
  elif ext in ['.jpg', '.jpeg']:
29
  img = Image.open(image_path).convert('RGB').resize((224, 224))
30
  img_np = np.array(img)
@@ -38,7 +35,6 @@ def preprocess_image(image_path):
38
 
39
  # Prepare dataset
40
  def prepare_dataset(extracted_folder):
41
- # Ensure the path exists
42
  neuronii_path = os.path.join(extracted_folder, "neuroniiimages")
43
 
44
  if not os.path.exists(neuronii_path):
@@ -50,11 +46,9 @@ def prepare_dataset(extracted_folder):
50
  for disease_folder in ['alzheimers_dataset', 'parkinsons_dataset', 'MSjpg']:
51
  folder_path = os.path.join(neuronii_path, disease_folder)
52
 
53
- # Check if the subfolder exists
54
  if not os.path.exists(folder_path):
55
  print(f"Folder not found: {folder_path}")
56
- continue # Skip this folder if it's not found
57
-
58
  label = {'alzheimers_dataset': 0, 'parkinsons_dataset': 1, 'MSjpg': 2}[disease_folder]
59
 
60
  for img_file in os.listdir(folder_path):
@@ -63,7 +57,6 @@ def prepare_dataset(extracted_folder):
63
  labels.append(label)
64
  else:
65
  print(f"Unsupported file: {img_file}")
66
-
67
  print(f"Total images loaded: {len(image_paths)}")
68
  return image_paths, labels
69
 
@@ -81,12 +74,11 @@ class CustomImageDataset(Dataset):
81
  label = self.labels[idx]
82
  return image, label
83
 
84
- # Training function
85
- # Training function
86
- def fine_tune_model(train_loader):
87
- model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', num_labels=3)
88
  model.train()
89
- optimizer = AdamW(model.parameters(), lr=1e-4) # Use PyTorch's AdamW
90
  criterion = torch.nn.CrossEntropyLoss()
91
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
92
  model.to(device)
@@ -104,16 +96,15 @@ def fine_tune_model(train_loader):
104
  return running_loss / len(train_loader)
105
 
106
  # Streamlit UI for Fine-tuning
107
- st.title("Fine-tune ViT on MRI/CT Scans for MS & Neurodegenerative Diseases")
108
 
109
- # Provide the correct zip file URL
110
  zip_file_url = "https://huggingface.co/spaces/Tanusree88/ViT-MRI-FineTuning/resolve/main/neuroniiimages.zip"
111
 
112
  if st.button("Start Training"):
113
  extraction_dir = "extracted_files"
114
  os.makedirs(extraction_dir, exist_ok=True)
115
 
116
- # Download the zip file (this is a placeholder; use requests or any other method to download the zip file)
117
  zip_file = "neuroniiimages.zip" # Assuming you downloaded it with this name
118
 
119
  # Extract zip file
@@ -124,6 +115,37 @@ if st.button("Start Training"):
124
  dataset = CustomImageDataset(image_paths, labels)
125
  train_loader = DataLoader(dataset, batch_size=32, shuffle=True)
126
 
127
- # Fine-tune the model
128
- final_loss = fine_tune_model(train_loader)
129
  st.write(f"Training Complete with Final Loss: {final_loss}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import zipfile
3
  import numpy as np
4
  import torch
5
+ from transformers import SegformerForImageSegmentation, ResNetForImageClassification, AdamW
 
6
  from PIL import Image
7
  from torch.utils.data import Dataset, DataLoader
8
  import streamlit as st
 
16
  def preprocess_image(image_path):
17
  ext = os.path.splitext(image_path)[-1].lower()
18
 
19
+ if ext == '.npy':
20
  image_data = np.load(image_path)
21
  image_tensor = torch.tensor(image_data).float()
22
+ if len(image_tensor.shape) == 3:
23
+ image_tensor = image_tensor.unsqueeze(0)
24
+
 
 
25
  elif ext in ['.jpg', '.jpeg']:
26
  img = Image.open(image_path).convert('RGB').resize((224, 224))
27
  img_np = np.array(img)
 
35
 
36
  # Prepare dataset
37
  def prepare_dataset(extracted_folder):
 
38
  neuronii_path = os.path.join(extracted_folder, "neuroniiimages")
39
 
40
  if not os.path.exists(neuronii_path):
 
46
  for disease_folder in ['alzheimers_dataset', 'parkinsons_dataset', 'MSjpg']:
47
  folder_path = os.path.join(neuronii_path, disease_folder)
48
 
 
49
  if not os.path.exists(folder_path):
50
  print(f"Folder not found: {folder_path}")
51
+ continue
 
52
  label = {'alzheimers_dataset': 0, 'parkinsons_dataset': 1, 'MSjpg': 2}[disease_folder]
53
 
54
  for img_file in os.listdir(folder_path):
 
57
  labels.append(label)
58
  else:
59
  print(f"Unsupported file: {img_file}")
 
60
  print(f"Total images loaded: {len(image_paths)}")
61
  return image_paths, labels
62
 
 
74
  label = self.labels[idx]
75
  return image, label
76
 
77
+ # Training function for classification
78
+ def fine_tune_classification_model(train_loader):
79
+ model = ResNetForImageClassification.from_pretrained('microsoft/resnet-50', num_labels=3)
 
80
  model.train()
81
+ optimizer = AdamW(model.parameters(), lr=1e-4)
82
  criterion = torch.nn.CrossEntropyLoss()
83
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
84
  model.to(device)
 
96
  return running_loss / len(train_loader)
97
 
98
  # Streamlit UI for Fine-tuning
99
+ st.title("Fine-tune ResNet for MRI/CT Scans Classification")
100
 
 
101
  zip_file_url = "https://huggingface.co/spaces/Tanusree88/ViT-MRI-FineTuning/resolve/main/neuroniiimages.zip"
102
 
103
  if st.button("Start Training"):
104
  extraction_dir = "extracted_files"
105
  os.makedirs(extraction_dir, exist_ok=True)
106
 
107
+ # Download the zip file (placeholder)
108
  zip_file = "neuroniiimages.zip" # Assuming you downloaded it with this name
109
 
110
  # Extract zip file
 
115
  dataset = CustomImageDataset(image_paths, labels)
116
  train_loader = DataLoader(dataset, batch_size=32, shuffle=True)
117
 
118
+ # Fine-tune the classification model
119
+ final_loss = fine_tune_classification_model(train_loader)
120
  st.write(f"Training Complete with Final Loss: {final_loss}")
121
+
122
+ # Segmentation function (using SegFormer)
123
+ def fine_tune_segmentation_model(train_loader):
124
+ model = SegformerForImageSegmentation.from_pretrained('nvidia/segformer-b0', num_labels=3)
125
+ model.train()
126
+ optimizer = AdamW(model.parameters(), lr=1e-4)
127
+ criterion = torch.nn.CrossEntropyLoss()
128
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
129
+ model.to(device)
130
+
131
+ for epoch in range(10):
132
+ running_loss = 0.0
133
+ for images, labels in train_loader:
134
+ images, labels = images.to(device), labels.to(device)
135
+ optimizer.zero_grad()
136
+ outputs = model(pixel_values=images).logits
137
+ loss = criterion(outputs, labels)
138
+ loss.backward()
139
+ optimizer.step()
140
+ running_loss += loss.item()
141
+ return running_loss / len(train_loader)
142
+
143
+ # Add a button for segmentation training
144
+ if st.button("Start Segmentation Training"):
145
+ # Assuming the dataset for segmentation is prepared similarly
146
+ seg_train_loader = DataLoader(dataset, batch_size=32, shuffle=True)
147
+
148
+ # Fine-tune the segmentation model
149
+ final_loss_seg = fine_tune_segmentation_model(seg_train_loader)
150
+ st.write(f"Segmentation Training Complete with Final Loss: {final_loss_seg}")
151
+