diff --git a/myproject/Dockerfile b/myproject/Dockerfile
index 5c0c8d92ec646f39341d138960e27fa663e2295a..1163b105836c4fa1a59730e54830ee46465cd226 100644
--- a/myproject/Dockerfile
+++ b/myproject/Dockerfile
@@ -31,6 +31,13 @@ WORKDIR /usr/src/app
 COPY ./requirements.txt .
 RUN pip install --upgrade pip && pip install -r requirements.txt
 
+# Update the package list
+RUN apt-get update
+
+# Install libsndfile
+RUN apt-get install -y libsndfile1
+
+
 # Copy project files from the builder stage and the current directory
 COPY --from=builder /usr/src/app/myapp/static ./myapp/static
 COPY . .
diff --git a/myproject/docker-compose.yml b/myproject/docker-compose.yml
index 84ac46fcb7746ef1f31bc090b71aa5ad58bdb5ca..05b4b15a6e4684bf309bc974bc598506e342a3ab 100644
--- a/myproject/docker-compose.yml
+++ b/myproject/docker-compose.yml
@@ -20,6 +20,7 @@ services:
       - static_volume:/usr/src/app/static
     depends_on:
       - db
+      - tensorflow_serving
     environment:
       DJANGO_SECRET_KEY: ${DJANGO_SECRET_KEY}
       DEBUG: ${DEBUG}
diff --git a/myproject/myapp/audio_preprocessing.py b/myproject/myapp/audio_preprocessing.py
index 36f38561b42745eed3daf385cf86d1c6e6664ad7..a6c6a633e02947a24c416698b575310ff6d53e0c 100644
--- a/myproject/myapp/audio_preprocessing.py
+++ b/myproject/myapp/audio_preprocessing.py
@@ -28,36 +28,36 @@ def preprocess_audio_for_inference(audio_path):
         preprocessed_windows.append(mel_db_resized)
     return preprocessed_windows
 
-# Preprocess your audio file
-audio_path = './static/src/media/80_Gm_LofiPiano_02_823.wav'  # Update this path
-preprocessed_data = preprocess_audio_for_inference(audio_path)
-
-# print(f"Number of windows: {len(preprocessed_data)}")
-# print(f"Value array: {preprocessed_data[0]}")
-
-# Write preprocessed data values to a text file
-# with open('G53_data.txt', 'w') as file:
-#     for window in preprocessed_data:
-#         for value in window.flatten():
-#             file.write(str(value) + '\n')
-# print("Preprocessed data values written to preprocessed_data.txt")
-
-
-# TensorFlow Serving URL
-url = 'http://localhost:8501/v1/models/instrument_model/versions/2:predict'
-
-# Prepare data for TensorFlow Serving
-data = json.dumps({"signature_name": "serving_default", "instances": [window.tolist() for window in preprocessed_data]})
-
-# Send request
-headers = {"Content-Type": "application/json"}
-response = requests.post(url, data=data, headers=headers)
-
-# Process response
-if response.status_code == 200:
-    predictions = response.json()['predictions']
-    # Process your predictions as needed
-    print(predictions)
-else:
-    print(f"Failed to get predictions, status code: {response.status_code}, response text: {response.text}")
+# # Preprocess your audio file
+# audio_path = './static/src/media/80_Gm_LofiPiano_02_823.wav'  # Update this path
+# preprocessed_data = preprocess_audio_for_inference(audio_path)
+
+# # print(f"Number of windows: {len(preprocessed_data)}")
+# # print(f"Value array: {preprocessed_data[0]}")
+
+# # Write preprocessed data values to a text file
+# # with open('G53_data.txt', 'w') as file:
+# #     for window in preprocessed_data:
+# #         for value in window.flatten():
+# #             file.write(str(value) + '\n')
+# # print("Preprocessed data values written to preprocessed_data.txt")
+
+
+# # TensorFlow Serving URL
+# url = 'http://localhost:8501/v1/models/instrument_model/versions/2:predict'
+
+# # Prepare data for TensorFlow Serving
+# data = json.dumps({"signature_name": "serving_default", "instances": [window.tolist() for window in preprocessed_data]})
+
+# # Send request
+# headers = {"Content-Type": "application/json"}
+# response = requests.post(url, data=data, headers=headers)
+
+# # Process response
+# if response.status_code == 200:
+#     predictions = response.json()['predictions']
+#     # Process your predictions as needed
+#     print(predictions)
+# else:
+#     print(f"Failed to get predictions, status code: {response.status_code}, response text: {response.text}")
 
diff --git a/myproject/myapp/serializers.py b/myproject/myapp/serializers.py
new file mode 100644
index 0000000000000000000000000000000000000000..6622e0001d24942875d2b58bb2f36a57d9f812df
--- /dev/null
+++ b/myproject/myapp/serializers.py
@@ -0,0 +1,4 @@
+from rest_framework import serializers
+
+class InstrumentDetectionSerializer(serializers.Serializer):
+    audio_file = serializers.FileField()
\ No newline at end of file
diff --git a/myproject/myapp/templates/index1.html b/myproject/myapp/templates/index1.html
index c0044f2d18170a5e7eb76ce0fd7fb3c646433677..db04949e1d2a937f992abe928c21330c2c815937 100644
--- a/myproject/myapp/templates/index1.html
+++ b/myproject/myapp/templates/index1.html
@@ -23,10 +23,9 @@
       <form enctype="multipart/form-data" method="post" id="uploadForm">
         {% csrf_token %}
         {{ form.audio_file }}
-        <button type="reset" class="text-gray-800 bg-white hover:bg-gray-100 focus:outline-none focus:ring-4 focus:ring-gray-300 font-medium rounded-lg text-sm px-5 py-2.5 me-2 mb-2 dark:bg-gray-300 dark:hover:bg-gray-400 dark:focus:ring-gray-500 dark:border-gray-500">
-          Clear
-        </button>
-        <button type="submit" class="text-white bg-gray-800 hover:bg-gray-900 focus:outline-none focus:ring-4 focus:ring-gray-300 font-medium rounded-lg text-sm px-5 py-2.5 me-2 mb-2 dark:bg-gray-800 dark:hover:bg-gray-700 dark:focus:ring-gray-700 dark:border-gray-700">
+        <a href="/" class="text-gray-800 bg-white hover:bg-gray-100 focus:outline-none focus:ring-4 focus:ring-gray-300 font-medium rounded-lg text-sm px-5 py-2.5 me-2 mb-2 dark:bg-gray-300 dark:hover:bg-gray-400 dark:focus:ring-gray-500 dark:border-gray-500">
+          Clear</a>
+        <button type="submit" id="runAlgorithmButton"  class="text-white bg-gray-800 hover:bg-gray-900 focus:outline-none focus:ring-4 focus:ring-gray-300 font-medium rounded-lg text-sm px-5 py-2.5 me-2 mb-2 dark:bg-gray-800 dark:hover:bg-gray-700 dark:focus:ring-gray-700 dark:border-gray-700">
           Run Algorithm
         </button>
       </form>
@@ -36,44 +35,49 @@
     <div id="player" class="py-8 px-4 mx-auto max-w-screen-xl lg:py-8 hidden">
       <div id="waveform" class="w-full h-32 m-4"></div>
       <button id="playButton" class="text-white bg-gray-800 hover:bg-gray-900 focus:outline-none focus:ring-4 focus:ring-gray-300 font-medium rounded-lg text-sm px-5 py-2.5 me-2 mb-2 dark:bg-gray-800 dark:hover:bg-gray-700 dark:focus:ring-gray-700 dark:border-gray-700 w-full" disabled>Play</button>
-  </div>
+    </div>
+    
+    
+    {% if predictions %}
+    <div id="predictions" class="py-8 px-4 mx-auto max-w-screen-xl">
+      <h3 class="text-2xl font-bold mb-4">{{ file_name }} Predictions:</h3>
+      <ul id="predictionList" class="space-y-2">
+        {% for prediction in predictions %}
+          <li class="bg-gray-100 dark:bg-gray-800 px-4 py-2 rounded-md" style="white-space: pre-line;">{{ prediction|safe }}</li>
+        {% endfor %}
+      </ul>
+    </div>
+    {% endif %}
+  </section>
+
+
   <script>
-      var wavesurfer = WaveSurfer.create({
-          container: '#waveform',
-          waveColor: 'gray',
-          progressColor: '#f9f1f1'
-      });
-      function loadAudioFile(event) {
+
+function loadAudioFile(event) {
         var file = event.target.files[0];
-        var reader = new FileReader();
-        reader.onload = function(e) {
+        if (file) {
             wavesurfer.loadBlob(file);
             document.getElementById('player').classList.remove('hidden');
-            submitForm();  // Submit the form
+            wavesurfer.on('ready', function () {
+                document.getElementById('playButton').disabled = false;
+            });
         }
-        reader.readAsDataURL(file);
     }
-      wavesurfer.on('ready', function () {
-          document.getElementById('playButton').disabled = false;
-      });
-      document.getElementById('playButton').addEventListener('click', function () {
-          wavesurfer.playPause();
-          this.textContent = wavesurfer.isPlaying() ? 'Stop' : 'Play';
-      });
-      wavesurfer.on('play', function () {
-          document.getElementById('playButton').textContent = 'Stop';
-      });
-      wavesurfer.on('pause', function () {
-          document.getElementById('playButton').textContent = 'Play';
-      });
-      function submitForm() {
+
+    function submitForm() {
         var form = document.getElementById('uploadForm');
         var formData = new FormData(form);
-        var xhr = new XMLHttpRequest();
-        xhr.open('POST', '/uploading_file/', true);
-        xhr.setRequestHeader('X-CSRFToken', getCookie('csrftoken'));  // Include the CSRF token
-        xhr.send(formData);
+        formData.append('audio_file', document.getElementById('audioFileInput').files[0]);
+        fetch('/', {method: 'POST', body: formData, headers: {'X-CSRFToken': getCookie('csrftoken')}})
+        .then(response => response.text())
+        .then(html => {
+            document.body.innerHTML = html;
+            initializeWaveSurfer();
+            initializeEventListeners();
+        })
+        .catch(error => console.error('Error submitting form:', error));
     }
+
     // Function to get the CSRF token
     function getCookie(name) {
         var cookieValue = null;
@@ -89,8 +93,71 @@
         }
         return cookieValue;
     }
+
+    // Function to clear predictions and reset WaveSurfer
+    function clearFormAndPredictions() {
+        // Clear predictions div
+        var predictionsDiv = document.getElementById('predictions');
+        if (predictionsDiv) {
+            predictionsDiv.innerHTML = '';
+            predictionsDiv.classList.add('hidden');
+        }
+        
+        // Reset WaveSurfer
+        if (window.wavesurfer) {
+            wavesurfer.empty();
+            document.getElementById('playButton').disabled = true;
+        }
+    }
+
+
+    function initializeWaveSurfer() {
+      window.wavesurfer = WaveSurfer.create({
+          container: '#waveform',
+          waveColor: 'gray',
+          progressColor: '#f9f1f1'
+      });
+
+      wavesurfer.on('play', function () {
+          document.getElementById('playButton').textContent = 'Stop';
+      });
+
+      wavesurfer.on('pause', function () {
+          document.getElementById('playButton').textContent = 'Play';
+      });
+
+      document.getElementById('playButton').addEventListener('click', function () {
+          wavesurfer.playPause();
+          this.textContent = wavesurfer.isPlaying() ? 'Stop' : 'Play';
+      });
+  }
+
+    document.addEventListener('DOMContentLoaded', function() {
+    initializeWaveSurfer();
+    initializeEventListeners();
+  });
+
+    function initializeEventListeners() {
+        document.getElementById('runAlgorithmButton').addEventListener('click', function() {
+            if (document.getElementById('audioFileInput').files.length > 0) {
+                submitForm();
+            } else {
+                alert('Please select a file to upload.');
+            }
+        });
+
+        document.getElementById('audioFileInput').addEventListener('change', function(event) {
+            loadAudioFile(event);
+        });
+
+        // Function to reset the form and clear the predictions
+        document.getElementById('uploadForm').addEventListener('reset', function() {
+            clearFormAndPredictions();
+        });
+    }
+
+
   </script>
-</section>
 
 
 {% endblock content %}
diff --git a/myproject/myapp/urls.py b/myproject/myapp/urls.py
index 0c58dc04c12d2aaffa94093d33289320e0918324..304d04754e37a9ff8a612df67e74b5cda123ecac 100644
--- a/myproject/myapp/urls.py
+++ b/myproject/myapp/urls.py
@@ -1,10 +1,11 @@
 from django.urls import path
-from .views import index, users, maintenance, handler404, handler500, register, user_login, terms_conditions, privacy_policy, handling_music_file, pricing, generate_pdf, admin_table
+
+from .views import InstrumentDetectionView, index, users, maintenance, handler404, handler500, register, user_login, terms_conditions, privacy_policy, handling_music_file, pricing, generate_pdf, admin_table
 from django.contrib.auth import views as auth_views
 
 urlpatterns = [
     # path('', index, name='index'), <- uncomment when index/main page will be ready
-    path('', index),
+    path('', index, name='index'),
     path('user/', users, name='users'),
     path('404/', handler404),
     path('500/', handler500),
@@ -19,6 +20,7 @@ urlpatterns = [
     path('pricing/', pricing, name='pricing'),
     path('generate_pdf/', generate_pdf, name='generate_pdf'),
     path('admin_table/', admin_table, name='admin_table'),
+    path('instrument_detection/', InstrumentDetectionView.as_view(), name='instrument_detection'),
     path('password_change/', auth_views.PasswordChangeView.as_view(template_name='password_change_form.html'), name='password_change'),
-    path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(template_name='password_change_done.html'), name='password_change_done'),
+    path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(template_name='password_change_done.html'), name='password_change_done')
 ]
diff --git a/myproject/myapp/views.py b/myproject/myapp/views.py
index 555691f83d60c983dd9670bd3cd2978045a89f6a..b90b29a1e030d1559ec66b614a9ebbb4cbe8ae02 100644
--- a/myproject/myapp/views.py
+++ b/myproject/myapp/views.py
@@ -16,6 +16,13 @@ from .models import Log, Action, User
 from django.http import JsonResponse
 from django.db import connection
 
+# Django Rest Framework imports
+from rest_framework.views import APIView
+from rest_framework.response import Response
+from rest_framework import status
+from .serializers import InstrumentDetectionSerializer
+from .audio_preprocessing import preprocess_audio_for_inference
+import requests
 
 logger = logging.getLogger(__name__)
 
@@ -89,19 +96,42 @@ def user_table(request):
     return JsonResponse({'data': data}, safe=False)
 
 def index(request):
-    #for now this authenication just returns the main view
-    #when user auth is done change the else to return index2.html
+    # Initialize default context
+    context = {'form': InstrumentDetectionForm(), 
+               'predictions': [],
+               'file_name': None
+               }
+    
+    # Handle authenticated users
     if request.user.is_authenticated:
         if request.method == 'POST':
-            if request.FILES['audio_file'] != None:
-                uploaded_file = request.FILES['audio_file']
-                # Do something with the uploaded file
-            return render(request, 'index1.html')
-        else:
-            return render(request, 'index1.html')
+            # Assuming you want to do something specific with the file if it's uploaded
+            uploaded_file = request.FILES.get('audio_file')
+            if uploaded_file:
+                # Process the uploaded file as needed
+                pass
+            # For now, just render the main page again, potentially after handling the uploaded file
+        # For GET requests or if POST doesn't involve a file, just show the main page
+        return render(request, 'index1.html')
+
+    # Handle unauthenticated users
     else:
-        audio_form = InstrumentDetectionForm()
-        return render(request, 'index1.html', {'form': audio_form})
+        if request.method == 'POST':
+            form = InstrumentDetectionForm(request.POST, request.FILES)
+            if form.is_valid() and 'audio_file' in request.FILES:
+                uploaded_file = request.FILES['audio_file']
+                context['file_name'] = uploaded_file.name
+                # Make a request to the InstrumentDetectionView to get the predictions
+                view = InstrumentDetectionView().as_view()
+                response = view(request)
+                # Ensure there's a response and it contains predictions before updating context
+                if response and hasattr(response, 'data') and 'predictions' in response.data:
+                    context['predictions'] = response.data['predictions']
+            else:
+                context['form'] = form
+        # For GET requests or if form is not valid, render the page with the default or updated context
+        return render(request, 'index1.html', context)
+
 
 def users(request):
     # Make a request to the admin_table view to get the data
@@ -188,3 +218,55 @@ def generate_pdf(request):
     p.save()
 
     return response
+
+# Running the audio file through the model
+class InstrumentDetectionView(APIView):
+    def post(self, request):
+        serializer = InstrumentDetectionSerializer(data=request.data)
+        if serializer.is_valid():
+            audio_file = serializer.validated_data['audio_file']
+            
+            # Save the uploaded file temporarily
+            with open('temp_audio.wav', 'wb') as f:
+                f.write(audio_file.read())
+            
+            # Preprocess the audio file
+            preprocessed_data = preprocess_audio_for_inference('temp_audio.wav')
+            
+            # Prepare data for TensorFlow Serving
+            data = json.dumps({"signature_name": "serving_default", "instances": [window.tolist() for window in preprocessed_data]})
+            
+            # Send request to TensorFlow Serving
+            url = 'http://tensorflow_serving:8501/v1/models/instrument_model/versions/2:predict'
+            headers = {"Content-Type": "application/json"}
+            response = requests.post(url, data=data, headers=headers)
+            
+            # Process the response
+            if response.status_code == 200:
+                raw_predictions = response.json()['predictions']
+                # Convert raw prediction numbers into percentages
+                formatted_predictions = self.format_predictions(raw_predictions)
+                return Response({"predictions": formatted_predictions}, status=status.HTTP_200_OK)
+            else:
+                return Response({"error": "Failed to get predictions"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+        
+        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
+    
+    def convert_to_percentages(self, predictions):
+        # Assuming predictions is a list of lists
+        percentage_predictions = []
+        for prediction in predictions:
+            total = sum(prediction)
+            # Convert each number to a percentage of the total, rounded to 2 decimal places
+            percentages = [round((number / total) * 100, 2) for number in prediction]
+            percentage_predictions.append(percentages)
+        return percentage_predictions
+    
+    def format_predictions(self, predictions):
+        instruments = ['Guitar', 'Drum', 'Violin', 'Piano']
+        formatted_predictions = []
+        for window_index, prediction in enumerate(predictions, start=1):
+            formatted_window = f"<strong>Window {window_index}</strong><br>"
+            formatted_scores = "<br>".join([f"{instruments[i]} - {score:.2f}" for i, score in enumerate(prediction)])
+            formatted_predictions.append(f"{formatted_window}{formatted_scores}")
+        return formatted_predictions
diff --git a/myproject/myproject/settings.py b/myproject/myproject/settings.py
index 1725ba2e02c1db575915dd7a88a048763feab2c3..bb27f10d6d8490f0b0f752ea6cecc14b008b24b3 100644
--- a/myproject/myproject/settings.py
+++ b/myproject/myproject/settings.py
@@ -40,6 +40,7 @@ INSTALLED_APPS = [
     'django.contrib.staticfiles',
     'myapp',
     'compressor',
+    'rest_framework',
 ]
 
 STATICFILES_DIRS = [
diff --git a/myproject/requirements.txt b/myproject/requirements.txt
index 72f9567a4738fc3db60a6bd1af8518f23d957002..287a93c4c764530b81983644f4a67744f38e5009 100644
--- a/myproject/requirements.txt
+++ b/myproject/requirements.txt
@@ -2,6 +2,7 @@ asgiref==3.7.2
 Django==5.0.1
 django-appconf==1.0.6
 django-compressor==4.4
+djangorestframework==3.15.0
 mysqlclient==2.2.3
 rcssmin==1.1.1
 rjsmin==1.2.1
@@ -33,4 +34,7 @@ soxr==0.3.7
 threadpoolctl==3.3.0
 typing_extensions==4.10.0
 urllib3==2.2.1
-reportlab==4.1.0
\ No newline at end of file
+reportlab==4.1.0
+chardet==5.2.0
+charset-normalizer==3.3.2
+pillow==10.2.0
\ No newline at end of file
diff --git a/myproject/temp_audio.wav b/myproject/temp_audio.wav
new file mode 100644
index 0000000000000000000000000000000000000000..b47a440af0d73187525a2566509da3183762a8e5
Binary files /dev/null and b/myproject/temp_audio.wav differ