diff --git a/.gitignore b/.gitignore
index e68b1648dc30520b390ea14368e1c8ace12a0a0c..f48ddc72ffb73c6af1e3f1b717610b9c02a966bf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,5 @@ myproject/node_modules
 myproject/env
 myproject/myapp/static/CACHE
 myproject/myapp/__pycache__
-.venv/
\ No newline at end of file
+.venv/
+.DS_Store
\ No newline at end of file
diff --git a/myproject/debug.log b/myproject/debug.log
index d9213e388ba1ab628dc1cd413c7ac12c274bfc80..17a31833391facd425e793bc3730b4ca8a455a2c 100644
--- a/myproject/debug.log
+++ b/myproject/debug.log
@@ -400,3 +400,4 @@ Watching for file changes with StatReloader
 Watching for file changes with StatReloader
 /usr/src/app/myapp/views.py changed, reloading.
 Watching for file changes with StatReloader
+Watching for file changes with StatReloader
diff --git a/myproject/myapp/audio_preprocessing.py b/myproject/myapp/audio_preprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..24728a942b3fc959c449a761c96d18efd99fea3c
--- /dev/null
+++ b/myproject/myapp/audio_preprocessing.py
@@ -0,0 +1,52 @@
+import librosa
+import numpy as np
+import requests
+import json
+
+def get_windows(audio, window_size=22050):
+    start = 0
+    windows = []
+    audio_len = len(audio)
+    while start < audio_len:
+        if start + window_size > audio_len:
+            break
+        window_end = int(start + window_size)
+        windows.append(audio[start:window_end])
+        start += int(window_size / 2)
+    return windows
+
+def preprocess_audio_for_inference(audio_path):
+    audio, sr = librosa.load(audio_path, sr=22050)
+    windows = get_windows(audio)
+    preprocessed_windows = []
+    for window in windows:
+        mel = librosa.feature.melspectrogram(y=window, sr=sr)
+        mel_db = librosa.power_to_db(mel, ref=np.max)
+        # Ensure the shape matches your model's expected input
+        mel_db_resized = np.resize(mel_db, (128, 44))
+        mel_db_resized = np.expand_dims(mel_db_resized, axis=-1)  # Adding the channel dimension
+        preprocessed_windows.append(mel_db_resized)
+    return preprocessed_windows
+
+# Preprocess your audio file
+audio_path = './static/src/media/Casio Piano C5 1980s.wav'  # Update this path
+preprocessed_data = preprocess_audio_for_inference(audio_path)
+
+# TensorFlow Serving URL
+url = 'http://localhost:8501/v1/models/instrument_model:predict'
+
+# Prepare data for TensorFlow Serving
+data = json.dumps({"signature_name": "serving_default", "instances": [window.tolist() for window in preprocessed_data]})
+
+# Send request
+headers = {"Content-Type": "application/json"}
+response = requests.post(url, data=data, headers=headers)
+
+# Process response
+if response.status_code == 200:
+    predictions = response.json()['predictions']
+    # Process your predictions as needed
+    print(predictions)
+else:
+    print(f"Failed to get predictions, status code: {response.status_code}, response text: {response.text}")
+