Skip to content
Snippets Groups Projects
Commit b7dcbc6e authored by a2-imeri's avatar a2-imeri
Browse files

Change code Stracture

parent 2c7bfe9e
No related branches found
No related tags found
No related merge requests found
# MisplaceAI/process_misplaced_manager/utils.py
from django.utils import timezone
from .models import DailyDetectionLimit, DetectionLimitSetting
def check_daily_limit(user, detection_type):
limit_setting = DetectionLimitSetting.objects.first()
if not limit_setting:
limit_setting = DetectionLimitSetting.objects.create(daily_image_limit=10, daily_video_limit=5)
detection_limit, created = DailyDetectionLimit.objects.get_or_create(user=user)
detection_limit.reset_limits() # Ensure limits are reset if the day has changed
if detection_type == 'image':
if detection_limit.image_detection_count >= limit_setting.daily_image_limit:
return False, limit_setting.daily_image_limit - detection_limit.image_detection_count
elif detection_type == 'video':
if detection_limit.video_detection_count >= limit_setting.daily_video_limit:
return False, limit_setting.daily_video_limit - detection_limit.video_detection_count
return True, limit_setting.daily_image_limit - detection_limit.image_detection_count if detection_type == 'image' else limit_setting.daily_video_limit - detection_limit.video_detection_count
from .models import DailyDetectionLimit
from PIL import Image, ExifTags
import cv2
from moviepy.editor import ImageSequenceClip
from item_detector.utils import run_inference
from placement_rules.utils import PlacementRules
from results_viewer.utils import visualize_pil_misplaced_objects
from django.conf import settings
import numpy as np
import os
def increment_detection_count(user, detection_type):
detection_limit = DailyDetectionLimit.objects.get(user=user)
......@@ -26,3 +19,70 @@ def increment_detection_count(user, detection_type):
elif detection_type == 'video':
detection_limit.video_detection_count += 1
detection_limit.save()
def correct_image_orientation(image_path):
try:
image = Image.open(image_path)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = image._getexif()
if exif is not None:
orientation = exif.get(orientation)
if orientation == 3:
image = image.rotate(180, expand=True)
elif orientation == 6:
image = image.rotate(270, expand=True)
elif orientation == 8:
image = image.rotate(90, expand=True)
image.save(image_path)
except (AttributeError, KeyError, IndexError):
pass
def process_video_for_misplaced_objects(video_path, frame_interval, frame_delay, detection_model, category_index):
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
misplaced_objects_all_frames = []
detected_objects_all_frames = []
frame_count = 0
annotated_frame_count = 1 # Start frame count from 1 for annotated frames
frame_interval_frames = frame_interval * fps
annotated_frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if frame_count % frame_interval_frames == 0:
image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert the frame to PIL image
image_pil = Image.fromarray(image_np)
detected_objects = run_inference(detection_model, category_index, image_pil)
placement_rules = PlacementRules()
misplaced_objects = placement_rules.check_placement(detected_objects)
detected_objects_all_frames.append(detected_objects)
misplaced_objects_all_frames.append(misplaced_objects)
# Annotate the frame with bounding boxes, labels, and annotated frame number
annotated_image_pil = visualize_pil_misplaced_objects(image_pil, detected_objects, misplaced_objects, annotated_frame_count)
annotated_image_np = np.array(annotated_image_pil)
annotated_frames.append(annotated_image_np)
# Increment the annotated frame count
annotated_frame_count += 1
frame_count += 1
cap.release()
# Create a video with a specified delay between each frame
output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', os.path.basename(video_path).replace('.mp4', '_annotated.mp4'))
annotated_clip = ImageSequenceClip(annotated_frames, fps=1/frame_delay)
annotated_clip.write_videofile(output_video_path, fps=fps, codec='libx264', audio_codec='aac')
return detected_objects_all_frames, misplaced_objects_all_frames, output_video_path
......@@ -13,15 +13,10 @@ from results_viewer.utils import visualize_misplaced_objects, visualize_pil_misp
from django.core.files.base import ContentFile
import base64
import os
from PIL import Image, ExifTags
import logging
import cv2
from django.conf import settings
from django.http import JsonResponse, HttpResponse, Http404
import numpy as np
from moviepy.editor import VideoFileClip, ImageSequenceClip
from .utils import increment_detection_count
from .utils import increment_detection_count, correct_image_orientation, process_video_for_misplaced_objects
logger = logging.getLogger(__name__)
......@@ -36,9 +31,6 @@ class UploadedImageViewSet(viewsets.ModelViewSet):
serializer_class = UploadedImageSerializer
permission_classes = [IsAuthenticated]
# MisplaceAI/process_misplaced_manager/views.py
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def normal_detection(request):
......@@ -86,26 +78,6 @@ def normal_detection(request):
print(f"Error processing image: {str(e)}")
return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def correct_image_orientation(image_path):
try:
image = Image.open(image_path)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = image._getexif()
if exif is not None:
orientation = exif.get(orientation)
if orientation == 3:
image = image.rotate(180, expand=True)
elif orientation == 6:
image = image.rotate(270, expand=True)
elif orientation == 8:
image = image.rotate(90, expand=True)
image.save(image_path)
except (AttributeError, KeyError, IndexError):
pass
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def display_results(request, image_id):
......@@ -184,7 +156,9 @@ def display_video_results(request, video_id):
print("Processing video at path:", video_path)
print(f"Frame interval: {frame_interval}, Frame delay: {frame_delay}")
detected_objects, misplaced_objects, output_video_path = process_video_for_misplaced_objects(video_path, frame_interval, frame_delay)
detected_objects, misplaced_objects, output_video_path = process_video_for_misplaced_objects(
video_path, frame_interval, frame_delay, detection_model, category_index
)
# Delete the original uploaded video
if os.path.exists(video_path):
......@@ -204,67 +178,6 @@ def display_video_results(request, video_id):
print(f"Error processing video results: {e}")
return JsonResponse({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def process_video_for_misplaced_objects(video_path, frame_interval, frame_delay):
cap = cv2.VideoCapture(video_path)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
misplaced_objects_all_frames = []
detected_objects_all_frames = []
frame_count = 0
annotated_frame_count = 1 # Start frame count from 1 for annotated frames
frame_interval_frames = frame_interval * fps
annotated_frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if frame_count % frame_interval_frames == 0:
image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert the frame to PIL image
image_pil = Image.fromarray(image_np)
detected_objects = run_inference(detection_model, category_index, image_pil)
placement_rules = PlacementRules()
misplaced_objects = placement_rules.check_placement(detected_objects)
detected_objects_all_frames.append(detected_objects)
misplaced_objects_all_frames.append(misplaced_objects)
# Annotate the frame with bounding boxes, labels, and annotated frame number
annotated_image_pil = visualize_pil_misplaced_objects(image_pil, detected_objects, misplaced_objects, annotated_frame_count)
annotated_image_np = np.array(annotated_image_pil)
annotated_frames.append(annotated_image_np)
# Increment the annotated frame count
annotated_frame_count += 1
frame_count += 1
cap.release()
# Create a video with a specified delay between each frame
output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', os.path.basename(video_path).replace('.mp4', '_annotated.mp4'))
annotated_clip = ImageSequenceClip(annotated_frames, fps=1/frame_delay)
annotated_clip.write_videofile(output_video_path, fps=fps, codec='libx264', audio_codec='aac')
return detected_objects_all_frames, misplaced_objects_all_frames, output_video_path
#################################################################################################
################################## Delete Video/Images ##########################################
@api_view(['DELETE'])
@permission_classes([IsAuthenticated])
def delete_image(request, image_name):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment