diff --git a/mlmodel/evaluation.py b/mlmodel/Version1/evaluation.py
similarity index 85%
rename from mlmodel/evaluation.py
rename to mlmodel/Version1/evaluation.py
index 8d309fcee2a11555e24061e55ed2014e88775972..55ef648ff6828eb4e468bed960af27b7bacefd03 100644
--- a/mlmodel/evaluation.py
+++ b/mlmodel/Version1/evaluation.py
@@ -2,23 +2,30 @@ from joblib import load
 from sklearn.metrics import accuracy_score, classification_report
 import numpy as np
 
+# Define function to evaluate model
 def evaluate_model(model_path, preprocessed_data_file):
     
+    # Load model
     model = load(model_path)
     
+    # Load preprocessed data
     preprocessed_data = np.load(preprocessed_data_file)
     test_images = preprocessed_data['test_images']
     test_labels = preprocessed_data['test_labels']
     
+    # Reshape images to 2D
     if len(test_images.shape) > 2:
         num_samples, height, width, channels = test_images.shape
         test_images = test_images.reshape(num_samples, height * width * channels)
     
+    # Predict
     predicted_labels = model.predict(test_images)
     
+    # Check accuracy
     accuracy = accuracy_score(test_labels, predicted_labels)
     print(f"Model Accuracy: {accuracy:.2f}")
     
+    Print Classification Report
     report = classification_report(test_labels, predicted_labels)
     print("Classification Report:")
     print(report)
diff --git a/mlmodel/main.py b/mlmodel/Version1/main.py
similarity index 77%
rename from mlmodel/main.py
rename to mlmodel/Version1/main.py
index bd2e8cec8e715cc66723bf8d31e4b21b34069340..25e4f0c8c1baa1f68ea060dbf2806d7714e0ee91 100644
--- a/mlmodel/main.py
+++ b/mlmodel/Version1/main.py
@@ -1,13 +1,19 @@
 from preprocessing import preprocess_images, split_data, save_preprocessed_data
 from training import train_model
 
+# Set directory for images
 dataset_dir = r'Cars Dataset\train'
+
+# Preprocess images
 images, labels = preprocess_images(dataset_dir)
 
+# Split images into training and testing
 train_images, test_images, train_labels, test_labels = split_data(images, labels)
 
+# Save the preprocessed data
 output_file = 'preprocessed_data.npz'
 save_preprocessed_data(train_images, test_images, train_labels, test_labels, output_file)
 
+# Train model 
 model = train_model(train_images, train_labels)
 
diff --git a/mlmodel/preprocessing.py b/mlmodel/Version1/preprocessing.py
similarity index 96%
rename from mlmodel/preprocessing.py
rename to mlmodel/Version1/preprocessing.py
index ce695479bf0c3eaa9a9406608080a718760d29e8..4e427eb83bf7218abe49b550d68d8eb88d107624 100644
--- a/mlmodel/preprocessing.py
+++ b/mlmodel/Version1/preprocessing.py
@@ -4,6 +4,7 @@ import numpy as np
 from sklearn.model_selection import train_test_split
 from sklearn.preprocessing import StandardScaler
 
+# Function to preprocess images
 def preprocess_images(dataset_dir, image_size=(224, 224)):
     images = []
     labels = []
@@ -28,6 +29,7 @@ def preprocess_images(dataset_dir, image_size=(224, 224)):
     
     return images, labels
 
+# Function to reshape images
 def preprocess_images_2d(images):
     # Reshape the images into 2D arrays
     num_samples, height, width, channels = images.shape
diff --git a/mlmodel/training.py b/mlmodel/Version1/training.py
similarity index 100%
rename from mlmodel/training.py
rename to mlmodel/Version1/training.py
diff --git a/mlmodel/Version2/categorization.py b/mlmodel/Version2/categorization.py
index 6edd44b34a89d599bf6a9a4b4dd75021c7a54420..deb97112ff918bfda786b80e96048176f663f266 100644
--- a/mlmodel/Version2/categorization.py
+++ b/mlmodel/Version2/categorization.py
@@ -1,34 +1,3 @@
-# import os
-# import shutil
-
-# # Set the directory where your images are stored
-# source_dir = 'Car Model Training'
-
-# # Set the directory where you want to organize the folders by category
-# target_dir = 'categories'
-
-# # Check if the target directory exists, if not, create it
-# if not os.path.exists(target_dir):
-#     os.mkdir(target_dir)
-
-# # Loop through each file in the source directory
-# for filename in os.listdir(source_dir):
-#     if filename.endswith((".png", ".jpg", ".jpeg")):  # Check for image files
-#         # Assuming the category is the first three parts of the filename (Make_Model_Year)
-#         category_parts = filename.split('_')[:3]
-#         category = '_'.join(category_parts)  # This will join them as 'Make_Model_Year'
-
-#         # Create a new folder path for the category if it doesn't exist
-#         category_path = os.path.join(target_dir, category)
-#         if not os.path.exists(category_path):
-#             os.mkdir(category_path)
-        
-#         # Move the file
-#         shutil.move(os.path.join(source_dir, filename), os.path.join(category_path, filename))
-
-# print("Images have been organized into category folders.")
-
-
 import os
 import shutil
 
diff --git a/mlmodel/Version2/cnn_training.py b/mlmodel/Version2/cnn_training.py
index 9f2d5cf804cc7ec959cdcab2370454eb5c29444b..9c68d16e5466be3a1a61c9d451e7dfb4b5953213 100644
--- a/mlmodel/Version2/cnn_training.py
+++ b/mlmodel/Version2/cnn_training.py
@@ -4,6 +4,7 @@ from tensorflow.keras.models import Sequential
 from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
 from tensorflow.keras.preprocessing.image import ImageDataGenerator
 
+# Define cnn model
 def create_cnn_model(input_shape, num_classes):
     model = Sequential([
         Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
@@ -19,6 +20,7 @@ def create_cnn_model(input_shape, num_classes):
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
     return model
 
+# Define function to train the model
 def train_cnn_model(train_images, train_labels):
     # Assume train_images and train_labels are already provided and preprocessed
 
diff --git a/mlmodel/Version2/evaluation.py b/mlmodel/Version2/evaluation.py
index 8f758dab5d057efa2389ab322edb5eb1a09248e7..5d724b0ffc42be1db4edbc11d0f3632656ea6adc 100644
--- a/mlmodel/Version2/evaluation.py
+++ b/mlmodel/Version2/evaluation.py
@@ -2,12 +2,14 @@ from tensorflow.keras.models import load_model
 from tensorflow.keras.preprocessing.image import ImageDataGenerator
 import numpy as np
 
+# Define function to evaluate model
 def evaluate_model(model_path, preprocessed_data_file):
     model = load_model(model_path)
     preprocessed_data = np.load(preprocessed_data_file)
     test_images = preprocessed_data['test_images'] / 255.0
     test_labels = preprocessed_data['test_labels']
 
+    # Evaluate model using the test images and labels
     results = model.evaluate(test_images, test_labels)
     print(f"Test Loss, Test Accuracy: {results}")
 
diff --git a/mlmodel/Version2/preprocessing.py b/mlmodel/Version2/preprocessing.py
index 0eb1ad69954b98b29cafa85b680f0387aed83aa7..e5eefa5c4c3f0f9fffd27c69318355b14416fe98 100644
--- a/mlmodel/Version2/preprocessing.py
+++ b/mlmodel/Version2/preprocessing.py
@@ -4,6 +4,7 @@ import numpy as np
 from sklearn.model_selection import train_test_split
 from sklearn.preprocessing import StandardScaler
 
+# Define function to preprocess images
 def preprocess_images(dataset_dir, image_size=(224, 224)):
     images = []
     labels = []
@@ -32,17 +33,6 @@ def preprocess_images(dataset_dir, image_size=(224, 224)):
     
     return images, labels
 
-# def preprocess_images_2d(images):
-#     # Reshape the images into 2D arrays
-#     num_samples, height, width, channels = images.shape
-#     images_2d = images.reshape(num_samples, height * width * channels)
-    
-#     # Standardize the pixel values
-#     scaler = StandardScaler()
-#     images_2d_scaled = scaler.fit_transform(images_2d)
-    
-#     return images_2d_scaled
-
 # Split the dataset for training 
 def split_data(images, labels, test_size=0.2):
     return train_test_split(images, labels, test_size=test_size)
diff --git a/mlmodel/Version3/eval.py b/mlmodel/Version3/eval.py
index e547659e1372e547ef7cda504cbae8b8011afa2c..e0bebe1d823af46955b0dda8d77a361531ed931b 100644
--- a/mlmodel/Version3/eval.py
+++ b/mlmodel/Version3/eval.py
@@ -1,11 +1,11 @@
 import tensorflow as tf 
-from tensorflow.keras.models import Model
+import numpy as np
 import cv2
-
+from tensorflow.keras.models import Model
 from tensorflow.keras.preprocessing import image
 from tensorflow.keras.preprocessing.image import load_img, img_to_array
-import numpy as np
 
+# Car brands
 categories = ['Acura', 'Alfa Romeo', 'Aston Martin', 'Audi', 'Bentley']
 
 # Load the model
diff --git a/mlmodel/Version3/preprocess.py b/mlmodel/Version3/preprocess.py
index a793d38da41f2bb4e880419c357a9d418932a6e6..3c3d2bfaa3828f80e0a3da562cc249708f9af663 100644
--- a/mlmodel/Version3/preprocess.py
+++ b/mlmodel/Version3/preprocess.py
@@ -2,6 +2,7 @@ import os
 import shutil
 from sklearn.model_selection import train_test_split
 
+# Function to split data
 def split_data(source_folder, train_size=0.7, val_size=0.15, test_size=0.15):
     # Specify the car brands to include
     included_brands = ['Audi', 'Bentley', 'Acura', 'Aston Martin', 'Alfa Romeo']
diff --git a/mlmodel/resnet_model5.h5 b/mlmodel/Version3/resnet_model5.h5
similarity index 100%
rename from mlmodel/resnet_model5.h5
rename to mlmodel/Version3/resnet_model5.h5