diff --git a/.idea/dataSources.xml b/.idea/dataSources.xml
index a14deba5d125c2fa73f773a2fc6f1023c26aa791..36a4b5fd235a254602f97bb77f8260022e538b49 100644
--- a/.idea/dataSources.xml
+++ b/.idea/dataSources.xml
@@ -8,5 +8,12 @@
       <jdbc-url>jdbc:mysql://localhost:3306/prediction_db</jdbc-url>
       <working-dir>$ProjectFileDir$</working-dir>
     </data-source>
+    <data-source source="LOCAL" name="prediction_db@localhost [2]" uuid="17632b9a-f71c-4fff-92d0-61931eed2c39">
+      <driver-ref>mysql.8</driver-ref>
+      <synchronize>true</synchronize>
+      <jdbc-driver>com.mysql.cj.jdbc.Driver</jdbc-driver>
+      <jdbc-url>jdbc:mysql://localhost:3306/prediction_db</jdbc-url>
+      <working-dir>$ProjectFileDir$</working-dir>
+    </data-source>
   </component>
 </project>
\ No newline at end of file
diff --git a/mlaas/__pycache__/__init__.cpython-310.pyc b/mlaas/__pycache__/__init__.cpython-310.pyc
index 5f9f80acf89f24c86d2630c27ca57abd24f33742..9f0fedc1fda0b8b2d47ac0822b44c09467c3ce37 100644
Binary files a/mlaas/__pycache__/__init__.cpython-310.pyc and b/mlaas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/mlaas/__pycache__/settings.cpython-310.pyc b/mlaas/__pycache__/settings.cpython-310.pyc
index 808a682e145f50eb85610c1519b97d6f3228123c..6f152b685e98d14ebd68ff2f44f9ab4e87e75e91 100644
Binary files a/mlaas/__pycache__/settings.cpython-310.pyc and b/mlaas/__pycache__/settings.cpython-310.pyc differ
diff --git a/mlaas/__pycache__/urls.cpython-310.pyc b/mlaas/__pycache__/urls.cpython-310.pyc
index d1901d71e5ab6b931fda5a92544cc0134df35512..ef9eab83ca1ae7e46bcef6ea27ed18e2fceef22f 100644
Binary files a/mlaas/__pycache__/urls.cpython-310.pyc and b/mlaas/__pycache__/urls.cpython-310.pyc differ
diff --git a/mlaas/__pycache__/wsgi.cpython-310.pyc b/mlaas/__pycache__/wsgi.cpython-310.pyc
index db38895afcdeb1b87e1860f6c5e52cfbe3bfdd04..70f4a29e89a284ebe7e900b1ac4c2c67ab445efb 100644
Binary files a/mlaas/__pycache__/wsgi.cpython-310.pyc and b/mlaas/__pycache__/wsgi.cpython-310.pyc differ
diff --git a/mlaas/middleware/__pycache__/__init__.cpython-310.pyc b/mlaas/middleware/__pycache__/__init__.cpython-310.pyc
index bcbe742706f165151f7f033dc3481d34f0d83b4c..a80be63977ad5910e7c209186ab3d8ba89ac2f71 100644
Binary files a/mlaas/middleware/__pycache__/__init__.cpython-310.pyc and b/mlaas/middleware/__pycache__/__init__.cpython-310.pyc differ
diff --git a/mlaas/middleware/__pycache__/ai_engineer_auth.cpython-310.pyc b/mlaas/middleware/__pycache__/ai_engineer_auth.cpython-310.pyc
index 8e8eca8dd83ab58ce1a5b19f41ada518b0cab366..45823ddc49c942a525e50e2efe8ae2b7a7945231 100644
Binary files a/mlaas/middleware/__pycache__/ai_engineer_auth.cpython-310.pyc and b/mlaas/middleware/__pycache__/ai_engineer_auth.cpython-310.pyc differ
diff --git a/mlaas/middleware/__pycache__/autologout.cpython-310.pyc b/mlaas/middleware/__pycache__/autologout.cpython-310.pyc
index 196236e92d012bf7c390d3ed49b859bb8bd7c743..65727ca390f13df45cba3317cf634599334efbf6 100644
Binary files a/mlaas/middleware/__pycache__/autologout.cpython-310.pyc and b/mlaas/middleware/__pycache__/autologout.cpython-310.pyc differ
diff --git a/mlmodel/Version2/categorization.py b/mlmodel/Version2/categorization.py
new file mode 100644
index 0000000000000000000000000000000000000000..6edd44b34a89d599bf6a9a4b4dd75021c7a54420
--- /dev/null
+++ b/mlmodel/Version2/categorization.py
@@ -0,0 +1,59 @@
+# import os
+# import shutil
+
+# # Set the directory where your images are stored
+# source_dir = 'Car Model Training'
+
+# # Set the directory where you want to organize the folders by category
+# target_dir = 'categories'
+
+# # Check if the target directory exists, if not, create it
+# if not os.path.exists(target_dir):
+#     os.mkdir(target_dir)
+
+# # Loop through each file in the source directory
+# for filename in os.listdir(source_dir):
+#     if filename.endswith((".png", ".jpg", ".jpeg")):  # Check for image files
+#         # Assuming the category is the first three parts of the filename (Make_Model_Year)
+#         category_parts = filename.split('_')[:3]
+#         category = '_'.join(category_parts)  # This will join them as 'Make_Model_Year'
+
+#         # Create a new folder path for the category if it doesn't exist
+#         category_path = os.path.join(target_dir, category)
+#         if not os.path.exists(category_path):
+#             os.mkdir(category_path)
+        
+#         # Move the file
+#         shutil.move(os.path.join(source_dir, filename), os.path.join(category_path, filename))
+
+# print("Images have been organized into category folders.")
+
+
+import os
+import shutil
+
+# Set the directory where your images are stored
+source_dir = 'Car Model Training'
+
+# Set the directory where you want to organize the folders by car make
+target_dir = 'categories2'
+
+# Check if the target directory exists, if not, create it
+if not os.path.exists(target_dir):
+    os.mkdir(target_dir)
+
+# Loop through each file in the source directory
+for filename in os.listdir(source_dir):
+    if filename.endswith((".png", ".jpg", ".jpeg")):  # Check for image files
+        # Assuming the make is the first part of the filename split by underscore
+        make = filename.split('_')[0]
+
+        # Create a new folder path for the make if it doesn't exist
+        make_path = os.path.join(target_dir, make)
+        if not os.path.exists(make_path):
+            os.mkdir(make_path)
+        
+        # Move the file
+        shutil.move(os.path.join(source_dir, filename), os.path.join(make_path, filename))
+
+print("Images have been organized into folders by car make.")
diff --git a/mlmodel/Version2/cnn_training.py b/mlmodel/Version2/cnn_training.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f2d5cf804cc7ec959cdcab2370454eb5c29444b
--- /dev/null
+++ b/mlmodel/Version2/cnn_training.py
@@ -0,0 +1,52 @@
+import numpy as np
+import tensorflow as tf
+from tensorflow.keras.models import Sequential
+from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+
+def create_cnn_model(input_shape, num_classes):
+    model = Sequential([
+        Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
+        MaxPooling2D((2, 2)),
+        Conv2D(64, (3, 3), activation='relu'),
+        MaxPooling2D((2, 2)),
+        Conv2D(64, (3, 3), activation='relu'),
+        Flatten(),
+        Dense(64, activation='relu'),
+        Dropout(0.5),
+        Dense(num_classes, activation='softmax')
+    ])
+    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
+    return model
+
+def train_cnn_model(train_images, train_labels):
+    # Assume train_images and train_labels are already provided and preprocessed
+
+    # Get the input shape and number of classes from the data
+    input_shape = train_images[0].shape
+    num_classes = len(np.unique(train_labels))
+
+    # Create the CNN model
+    model = create_cnn_model(input_shape, num_classes)
+
+    # Data augmentation
+    train_datagen = ImageDataGenerator(
+        rescale=1./255,
+        rotation_range=20,
+        width_shift_range=0.2,
+        height_shift_range=0.2,
+        shear_range=0.2,
+        zoom_range=0.2,
+        horizontal_flip=True,
+        fill_mode='nearest'
+    )
+    
+    train_generator = train_datagen.flow(train_images, train_labels, batch_size=32)
+
+    # Train the model
+    history = model.fit(train_generator, epochs=10, validation_data=(test_images / 255.0, test_labels))
+    
+    # Save the model
+    model.save('cnn_model.h5')
+
+    return history
diff --git a/mlmodel/Version2/evaluation.py b/mlmodel/Version2/evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f758dab5d057efa2389ab322edb5eb1a09248e7
--- /dev/null
+++ b/mlmodel/Version2/evaluation.py
@@ -0,0 +1,17 @@
+from tensorflow.keras.models import load_model
+from tensorflow.keras.preprocessing.image import ImageDataGenerator
+import numpy as np
+
+def evaluate_model(model_path, preprocessed_data_file):
+    model = load_model(model_path)
+    preprocessed_data = np.load(preprocessed_data_file)
+    test_images = preprocessed_data['test_images'] / 255.0
+    test_labels = preprocessed_data['test_labels']
+
+    results = model.evaluate(test_images, test_labels)
+    print(f"Test Loss, Test Accuracy: {results}")
+
+if __name__ == "__main__":
+    model_path = 'cnn_model.h5'
+    preprocessed_data_file = 'preprocessed_data.npz'
+    evaluate_model(model_path, preprocessed_data_file)
diff --git a/mlmodel/Version2/main.py b/mlmodel/Version2/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..752aed5c6514ba33105a2ef0466bb7823630935d
--- /dev/null
+++ b/mlmodel/Version2/main.py
@@ -0,0 +1,18 @@
+from preprocessing import preprocess_images, split_data, save_preprocessed_data
+from cnn_training import train_cnn_model
+
+# Configuration for the dataset directory
+dataset_dir = r'categories2'
+
+# Preprocess the images
+images, labels = preprocess_images(dataset_dir)
+
+# Split the data into training and testing sets
+train_images, test_images, train_labels, test_labels = split_data(images, labels)
+
+# Save the preprocessed data
+output_file = 'preprocessed_data.npz'
+save_preprocessed_data(train_images, test_images, train_labels, test_labels, output_file)
+
+# Train the model
+model = train_cnn_model(train_images, train_labels)
diff --git a/mlmodel/Version2/preprocessing.py b/mlmodel/Version2/preprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..0eb1ad69954b98b29cafa85b680f0387aed83aa7
--- /dev/null
+++ b/mlmodel/Version2/preprocessing.py
@@ -0,0 +1,52 @@
+import os 
+import cv2
+import numpy as np
+from sklearn.model_selection import train_test_split
+from sklearn.preprocessing import StandardScaler
+
+def preprocess_images(dataset_dir, image_size=(224, 224)):
+    images = []
+    labels = []
+    
+    for label in os.listdir(dataset_dir):
+        class_dir = os.path.join(dataset_dir, label)
+        
+        for image_file in os.listdir(class_dir):
+            image_path = os.path.join(class_dir, image_file)
+            
+            # Read the image
+            image = cv2.imread(image_path)
+            if image is None:
+                print(f"Failed to load image at {image_path}. Skipping...")
+                continue
+            
+            # Resize the image
+            image = cv2.resize(image, image_size)
+            image = image.astype('float32') / 255.0
+            images.append(image)
+            labels.append(label)
+    
+    # Convert the list to numpy arrays
+    images = np.array(images)
+    labels = np.array(labels)
+    
+    return images, labels
+
+# def preprocess_images_2d(images):
+#     # Reshape the images into 2D arrays
+#     num_samples, height, width, channels = images.shape
+#     images_2d = images.reshape(num_samples, height * width * channels)
+    
+#     # Standardize the pixel values
+#     scaler = StandardScaler()
+#     images_2d_scaled = scaler.fit_transform(images_2d)
+    
+#     return images_2d_scaled
+
+# Split the dataset for training 
+def split_data(images, labels, test_size=0.2):
+    return train_test_split(images, labels, test_size=test_size)
+
+# Save the preprocessed data
+def save_preprocessed_data(train_images, test_images, train_labels, test_labels, output_file):
+    np.savez(output_file, train_images=train_images, test_images=test_images, train_labels=train_labels, test_labels=test_labels)
\ No newline at end of file
diff --git a/mlmodel/Version3/classification.py b/mlmodel/Version3/classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..79b90e5a74bbf8c8338de0bfc9b30324146c8055
--- /dev/null
+++ b/mlmodel/Version3/classification.py
@@ -0,0 +1,38 @@
+import os
+import numpy as np
+import tensorflow as tf
+from tensorflow.keras.preprocessing.image import load_img, img_to_array
+from sklearn.metrics import classification_report
+
+# Categories
+categories = ['Acura', 'Alfa Romeo', 'Aston Martin', 'Audi', 'Bentley']
+
+# Load the model
+model = tf.keras.models.load_model('resnet_model4.h5')
+
+# Function to prepare image
+def prepare_image(path_for_image):
+    img = load_img(path_for_image, target_size=(224, 224))
+    img = img_to_array(img)
+    img = np.expand_dims(img, axis=0)
+    img /= 255
+    return img
+
+# Load test images and labels
+test_dir = 'C:/Users/DELL/Desktop/DESD_AI_Training/Version2/test/'
+test_images = []
+true_labels = []
+
+for category in categories:
+    category_dir = os.path.join(test_dir, category)
+    for img_name in os.listdir(category_dir):
+        img_path = os.path.join(category_dir, img_name)
+        test_images.append(prepare_image(img_path))
+        true_labels.append(category)
+
+# Make predictions
+predictions = [model.predict(img)[0] for img in test_images]
+predicted_labels = [categories[np.argmax(pred)] for pred in predictions]
+
+# Print classification report
+print(classification_report(true_labels, predicted_labels, target_names=categories))
diff --git a/mlmodel/Version3/eval.py b/mlmodel/Version3/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..e547659e1372e547ef7cda504cbae8b8011afa2c
--- /dev/null
+++ b/mlmodel/Version3/eval.py
@@ -0,0 +1,40 @@
+import tensorflow as tf 
+from tensorflow.keras.models import Model
+import cv2
+
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.preprocessing.image import load_img, img_to_array
+import numpy as np
+
+categories = ['Acura', 'Alfa Romeo', 'Aston Martin', 'Audi', 'Bentley']
+
+# Load the model
+model = tf.keras.models.load_model('resnet_model5.h5')
+print(model.summary())
+
+# prepare the image
+def prepare_image(path_for_image):
+    image = load_img(path_for_image, target_size=(224, 224))
+    image_result = img_to_array(image)
+    image_result = np.expand_dims(image_result, axis=0)
+    image_result = image_result/255
+    return image_result
+
+# Make a prediction
+test_image = "C:/Users/DELL/Desktop/DESD_AI_Training/Version2/test/Audi/Audi_test2.jpg"
+
+image_for_model = prepare_image(test_image)
+result_array = model.predict(image_for_model, verbose=1)
+answer = np.argmax(result_array, axis=1)
+
+# Convert results to percentages
+percentage_results = result_array[0] * 100
+
+# Print results in a more readable format
+print("Prediction percentages per category:")
+for category, percentage in zip(categories, percentage_results):
+    print(f"{category}: {percentage:.2f}%")
+
+index = answer[0]
+
+print("The car is: "+ categories[index])
\ No newline at end of file
diff --git a/mlmodel/Version3/model eval.txt b/mlmodel/Version3/model eval.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e547659e1372e547ef7cda504cbae8b8011afa2c
--- /dev/null
+++ b/mlmodel/Version3/model eval.txt	
@@ -0,0 +1,40 @@
+import tensorflow as tf 
+from tensorflow.keras.models import Model
+import cv2
+
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.preprocessing.image import load_img, img_to_array
+import numpy as np
+
+categories = ['Acura', 'Alfa Romeo', 'Aston Martin', 'Audi', 'Bentley']
+
+# Load the model
+model = tf.keras.models.load_model('resnet_model5.h5')
+print(model.summary())
+
+# prepare the image
+def prepare_image(path_for_image):
+    image = load_img(path_for_image, target_size=(224, 224))
+    image_result = img_to_array(image)
+    image_result = np.expand_dims(image_result, axis=0)
+    image_result = image_result/255
+    return image_result
+
+# Make a prediction
+test_image = "C:/Users/DELL/Desktop/DESD_AI_Training/Version2/test/Audi/Audi_test2.jpg"
+
+image_for_model = prepare_image(test_image)
+result_array = model.predict(image_for_model, verbose=1)
+answer = np.argmax(result_array, axis=1)
+
+# Convert results to percentages
+percentage_results = result_array[0] * 100
+
+# Print results in a more readable format
+print("Prediction percentages per category:")
+for category, percentage in zip(categories, percentage_results):
+    print(f"{category}: {percentage:.2f}%")
+
+index = answer[0]
+
+print("The car is: "+ categories[index])
\ No newline at end of file
diff --git a/mlmodel/Version3/preprocess.py b/mlmodel/Version3/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..a793d38da41f2bb4e880419c357a9d418932a6e6
--- /dev/null
+++ b/mlmodel/Version3/preprocess.py
@@ -0,0 +1,37 @@
+import os
+import shutil
+from sklearn.model_selection import train_test_split
+
+def split_data(source_folder, train_size=0.7, val_size=0.15, test_size=0.15):
+    # Specify the car brands to include
+    included_brands = ['Audi', 'Bentley', 'Acura', 'Aston Martin', 'Alfa Romeo']
+    
+    # Create directories for the train, validation, and test splits
+    for name in ['train', 'val', 'test']:
+        os.makedirs(os.path.join(source_folder, name), exist_ok=True)
+
+    # Loop through each specified class subfolder
+    for brand in included_brands:
+        class_folder = os.path.join(source_folder, brand)
+        
+        if os.path.isdir(class_folder):  # Check if it's a directory
+            # Get all image filenames
+            images = [os.path.join(class_folder, f) for f in os.listdir(class_folder)]
+            
+            # Split data into train and temp (temp will further be split into val and test)
+            train_files, temp_files = train_test_split(images, train_size=train_size, test_size=val_size+test_size, random_state=42)
+            val_files, test_files = train_test_split(temp_files, train_size=val_size/(val_size+test_size), test_size=test_size/(val_size+test_size), random_state=42)
+
+            # Function to copy files to a target directory
+            def copy_files(files, target_dir):
+                os.makedirs(target_dir, exist_ok=True)
+                for f in files:
+                    shutil.copy(f, os.path.join(target_dir, os.path.basename(f)))
+            
+            # Copy files to respective directories
+            copy_files(train_files, os.path.join(source_folder, 'train', brand))
+            copy_files(val_files, os.path.join(source_folder, 'val', brand))
+            copy_files(test_files, os.path.join(source_folder, 'test', brand))
+
+# Example usage
+split_data('imgs', train_size=0.7, val_size=0.15, test_size=0.15)
diff --git a/mlmodel/Version3/resnet_model.h5 b/mlmodel/Version3/resnet_model.h5
new file mode 100644
index 0000000000000000000000000000000000000000..0e37cb8b4ab4af00d93de6ec474b72df3ed230a7
Binary files /dev/null and b/mlmodel/Version3/resnet_model.h5 differ
diff --git a/mlmodel/Version3/resnet_model.py b/mlmodel/Version3/resnet_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..4efe31ac1594ef3c696fe9a911b5204a71604c85
--- /dev/null
+++ b/mlmodel/Version3/resnet_model.py
@@ -0,0 +1,129 @@
+# from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
+# from tensorflow.keras.models import Model
+# from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
+# from tensorflow.keras.preprocessing import image
+# from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
+# from tensorflow.keras.models import Sequential
+# import numpy as np
+# from glob import glob
+
+
+# IMAGE_SIZE = [224, 224]
+
+# train_folder = "train"
+# validation_folder = "val"
+
+# ResNet = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
+
+# print(ResNet.summary())
+
+# for layer in ResNet.layers:
+#     layer.trainable = False
+
+# # Classes
+# Classes = glob(train_folder + '/*')
+# print(Classes)
+
+# classes_num = len(Classes)
+# print(classes_num)
+
+# # Next layers of the model
+
+# # Flatten the output of the ResNet50 model
+# plus_flatten_layer = Flatten()(ResNet.output)
+
+# # Dense layer with classes 
+# prediction = Dense(classes_num, activation='softmax')(plus_flatten_layer)
+
+# # Create a model object 
+# model = Model(inputs=ResNet.input, outputs=prediction)
+
+# print(model.summary())
+
+# # Compile the model
+# model.compile(
+#     loss='categorical_crossentropy',
+#     optimizer='adam',
+#     metrics=['accuracy']
+# )
+
+# # image augmentation
+
+# from tensorflow.keras.preprocessing.image import ImageDataGenerator
+
+# train_datagen = ImageDataGenerator(rescale=1./255,
+#                                    shear_range=0.2,
+#                                    zoom_range=0.2,
+#                                    horizontal_flip=True)
+
+# test_datagen = ImageDataGenerator(rescale=1. /255)
+
+# training_set = train_datagen.flow_from_directory(train_folder, target_size=(224, 224), batch_size=32, class_mode='categorical')
+# test_set = test_datagen.flow_from_directory(validation_folder, target_size=(224, 224), batch_size=32, class_mode='categorical')
+
+# steps_per_epoch = training_set.samples // training_set.batch_size
+# validation_steps = test_set.samples // test_set.batch_size
+
+# # Fit the model
+# result = model.fit(training_set,
+#                    validation_data=test_set,
+#                    epochs=60,
+#                    steps_per_epoch=steps_per_epoch,
+#                    validation_steps=validation_steps)
+
+# # Save the model
+# model.save('resnet_model3.h5')
+
+
+
+from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
+from tensorflow.keras.models import Model
+from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
+from tensorflow.keras.preprocessing import image
+from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
+from tensorflow.keras.models import Sequential
+import numpy as np
+from glob import glob
+
+# Image size and paths setup
+IMAGE_SIZE = [224, 224]
+train_folder = "train"
+validation_folder = "val"
+
+# Load the ResNet50 model with pretrained ImageNet weights
+ResNet = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
+
+# Set all layers to non-trainable
+for layer in ResNet.layers:
+    layer.trainable = False
+
+# Determine number of classes dynamically
+Classes = glob(train_folder + '/*')
+classes_num = len(Classes)
+
+# Adding a Flatten and Dense layer to the model
+plus_flatten_layer = Flatten()(ResNet.output)
+prediction = Dense(classes_num, activation='softmax')(plus_flatten_layer)
+
+# Create the complete model
+model = Model(inputs=ResNet.input, outputs=prediction)
+model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
+
+# Setting up data generators
+train_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
+test_datagen = ImageDataGenerator(rescale=1./255)
+
+training_set = train_datagen.flow_from_directory(train_folder, target_size=(224, 224), batch_size=32, class_mode='categorical')
+test_set = test_datagen.flow_from_directory(validation_folder, target_size=(224, 224), batch_size=32, class_mode='categorical')
+
+# steps_per_epoch = training_set.samples // training_set.batch_size
+# validation_steps = test_set.samples // test_set.batch_size
+
+# Training the model
+result = model.fit(training_set, validation_data=test_set, epochs=60)
+
+# Save the model
+model.save('resnet_model5.h5')
+
+
+
diff --git a/mlmodel/Version3/resnet_model2.h5 b/mlmodel/Version3/resnet_model2.h5
new file mode 100644
index 0000000000000000000000000000000000000000..19b5d065dbd4c3d6c040c48b6dbf4d4ca57fa20f
Binary files /dev/null and b/mlmodel/Version3/resnet_model2.h5 differ
diff --git a/mlmodel/Version3/resnet_model3.h5 b/mlmodel/Version3/resnet_model3.h5
new file mode 100644
index 0000000000000000000000000000000000000000..379e4ebc59550a99ea2c38dabfeb1656e8795c4c
Binary files /dev/null and b/mlmodel/Version3/resnet_model3.h5 differ
diff --git a/mlmodel/Version3/resnet_model4.h5 b/mlmodel/Version3/resnet_model4.h5
new file mode 100644
index 0000000000000000000000000000000000000000..9249b0bbe74ef06fa4cc694c0a8fad9883d1bcf3
Binary files /dev/null and b/mlmodel/Version3/resnet_model4.h5 differ
diff --git a/mlmodel/Version3/resnet_model5.h5 b/mlmodel/Version3/resnet_model5.h5
new file mode 100644
index 0000000000000000000000000000000000000000..b038bafdbed728a15653a1371de95c5e5647602f
Binary files /dev/null and b/mlmodel/Version3/resnet_model5.h5 differ
diff --git a/prediction_service/__pycache__/__init__.cpython-310.pyc b/prediction_service/__pycache__/__init__.cpython-310.pyc
index c94643b1202d4d4057431f19b2317d57848efd62..df9b7359cecf6be1e8ae77a54cb1634a6d22fb53 100644
Binary files a/prediction_service/__pycache__/__init__.cpython-310.pyc and b/prediction_service/__pycache__/__init__.cpython-310.pyc differ
diff --git a/prediction_service/__pycache__/admin.cpython-310.pyc b/prediction_service/__pycache__/admin.cpython-310.pyc
index 3d0aa399fc21517c1e247f49738d44faa3cf2c1c..f6551e6972cf72a0d6127a7f434be789c664307f 100644
Binary files a/prediction_service/__pycache__/admin.cpython-310.pyc and b/prediction_service/__pycache__/admin.cpython-310.pyc differ
diff --git a/prediction_service/__pycache__/apps.cpython-310.pyc b/prediction_service/__pycache__/apps.cpython-310.pyc
index e40330c1af2ec3385920d41c872e75c0c57939cd..c1f4c9c51ebd3fad48dd469801b13007ce497049 100644
Binary files a/prediction_service/__pycache__/apps.cpython-310.pyc and b/prediction_service/__pycache__/apps.cpython-310.pyc differ
diff --git a/prediction_service/__pycache__/forms.cpython-310.pyc b/prediction_service/__pycache__/forms.cpython-310.pyc
index 652e31d73c46f97533467bb7939be82466183871..190616c0f68ba086659eab2f324dd4067026408c 100644
Binary files a/prediction_service/__pycache__/forms.cpython-310.pyc and b/prediction_service/__pycache__/forms.cpython-310.pyc differ
diff --git a/prediction_service/__pycache__/models.cpython-310.pyc b/prediction_service/__pycache__/models.cpython-310.pyc
index fb33c32ae56b34b9ce9a294a115e9424d05195bd..c4c54c42f7521f4556e19fd3de23d7c67fed8198 100644
Binary files a/prediction_service/__pycache__/models.cpython-310.pyc and b/prediction_service/__pycache__/models.cpython-310.pyc differ
diff --git a/prediction_service/__pycache__/urls.cpython-310.pyc b/prediction_service/__pycache__/urls.cpython-310.pyc
index 406367e661d8f47678e9558094ac176faa799321..786e153a3fe304dd5d42013c2f692a1db2a6311c 100644
Binary files a/prediction_service/__pycache__/urls.cpython-310.pyc and b/prediction_service/__pycache__/urls.cpython-310.pyc differ
diff --git a/prediction_service/__pycache__/views.cpython-310.pyc b/prediction_service/__pycache__/views.cpython-310.pyc
index e9aa17e3345c700c626306adbe7ced8b8388f99c..74b7ee27f814bc038df0e0cc011cab0046f32f13 100644
Binary files a/prediction_service/__pycache__/views.cpython-310.pyc and b/prediction_service/__pycache__/views.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0001_initial.cpython-310.pyc b/prediction_service/migrations/__pycache__/0001_initial.cpython-310.pyc
index 9aa0439f8eba863c159d8a535b9b9954d5858878..42488715c8d7c57cdfb23c813c4dd69a63aa412b 100644
Binary files a/prediction_service/migrations/__pycache__/0001_initial.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0001_initial.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0002_remove_aiengineer_trained_models_and_more.cpython-310.pyc b/prediction_service/migrations/__pycache__/0002_remove_aiengineer_trained_models_and_more.cpython-310.pyc
index 5e419840ba17e246de8a82027a70aaceeee25484..5388c92c55b6da9493e5760368ebbb8bfa5ade48 100644
Binary files a/prediction_service/migrations/__pycache__/0002_remove_aiengineer_trained_models_and_more.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0002_remove_aiengineer_trained_models_and_more.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0003_post.cpython-310.pyc b/prediction_service/migrations/__pycache__/0003_post.cpython-310.pyc
index e3a6267611c5d63465d80b26fd07327deea0fc9f..6560a8c4cea30243888184d2a9180735d231588c 100644
Binary files a/prediction_service/migrations/__pycache__/0003_post.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0003_post.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0004_alter_userprofile_role.cpython-310.pyc b/prediction_service/migrations/__pycache__/0004_alter_userprofile_role.cpython-310.pyc
index fe6e8a2e38e9cf77f98402ffc57f0ad352d9e4f0..9ad9acaff84bfa620c29c81228e4e93e5956a34e 100644
Binary files a/prediction_service/migrations/__pycache__/0004_alter_userprofile_role.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0004_alter_userprofile_role.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0005_alter_post_id_delete_financeteam.cpython-310.pyc b/prediction_service/migrations/__pycache__/0005_alter_post_id_delete_financeteam.cpython-310.pyc
index a5b26f142235ea9facc86c8e169568620fad9ea2..7f01c9ebcfe6265154aac954fdc3bb71170fc670 100644
Binary files a/prediction_service/migrations/__pycache__/0005_alter_post_id_delete_financeteam.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0005_alter_post_id_delete_financeteam.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0006_mlmodel.cpython-310.pyc b/prediction_service/migrations/__pycache__/0006_mlmodel.cpython-310.pyc
index bacd82fd32abafc4179758965f95db8e7a203c46..2bae6b2cabdd138ee938f7c7e45d1e267363da43 100644
Binary files a/prediction_service/migrations/__pycache__/0006_mlmodel.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0006_mlmodel.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0007_post_mlmodel.cpython-310.pyc b/prediction_service/migrations/__pycache__/0007_post_mlmodel.cpython-310.pyc
index 60e56c987d571c764790bf3e927f1fc2364666de..41f47774ed95c98c1a95a5c297f47bb17d38ece4 100644
Binary files a/prediction_service/migrations/__pycache__/0007_post_mlmodel.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0007_post_mlmodel.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0008_post_slug.cpython-310.pyc b/prediction_service/migrations/__pycache__/0008_post_slug.cpython-310.pyc
index 269df00d15384796a285a6073ff7316546c3c935..6b0b8f53df8f3f268cd0781607619b3913310773 100644
Binary files a/prediction_service/migrations/__pycache__/0008_post_slug.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0008_post_slug.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/0009_aiengineer_is_authorized.cpython-310.pyc b/prediction_service/migrations/__pycache__/0009_aiengineer_is_authorized.cpython-310.pyc
index 32dead685fc5ed858048143e5f4290b3228bde7d..09036d11139adc6b6bf3a5609008d688f13d9f0c 100644
Binary files a/prediction_service/migrations/__pycache__/0009_aiengineer_is_authorized.cpython-310.pyc and b/prediction_service/migrations/__pycache__/0009_aiengineer_is_authorized.cpython-310.pyc differ
diff --git a/prediction_service/migrations/__pycache__/__init__.cpython-310.pyc b/prediction_service/migrations/__pycache__/__init__.cpython-310.pyc
index 530685db3e01649a821eceb29b96c5bbafbf48e4..135afc293d590cfddf6ca6b18c20878b6da9cd7d 100644
Binary files a/prediction_service/migrations/__pycache__/__init__.cpython-310.pyc and b/prediction_service/migrations/__pycache__/__init__.cpython-310.pyc differ