# Smart Parking System - Hugging Face Spaces Version # Run this in Hugging Face Spaces !pip install -q torch torchvision transformers pillow matplotlib numpy opencv-python-headless timm einops gradio !pip install -q git+https://github.com/facebookresearch/segment-anything-2.git !pip install -q ultralytics supervision import torch import numpy as np from PIL import Image import matplotlib.pyplot as plt import matplotlib.patches as patches from datetime import datetime, timedelta import cv2 from transformers import AutoProcessor, AutoModelForCausalLM import gradio as gr import warnings warnings.filterwarnings('ignore') class StateOfTheArtParkingDetector: def __init__(self, total_spaces=50): self.total_spaces = total_spaces self.device = "cuda" if torch.cuda.is_available() else "cpu" print(f"🚀 Loading Models... Device: {self.device}") # Try Florence-2 (optional, can skip if CPU) self.florence_available = False try: self.processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True) self.model = AutoModelForCausalLM.from_pretrained( "microsoft/Florence-2-base", torch_dtype=torch.float16 if self.device=="cuda" else torch.float32, trust_remote_code=True ).to(self.device) self.florence_available = True print("✅ Florence-2 loaded") except: print("⚠️ Florence-2 unavailable, skipping") # YOLO-World try: from ultralytics import YOLO self.yolo_world = YOLO('yolov8x-worldv2.pt') self.yolo_world_available = True print("✅ YOLO-World loaded") except Exception as e: print(f"⚠️ YOLO-World unavailable: {e}") self.yolo_world_available = False # Florence detection def detect_with_florence(self, image): if not self.florence_available: return [] # Minimal placeholder to avoid crash return [] # YOLO-World detection def detect_with_yolo_world(self, image): if not self.yolo_world_available: return [] self.yolo_world.set_classes([ "car", "vehicle", "automobile", "truck", "van", "SUV", "sedan", "parked car", "parking space with car" ]) img_array = np.array(image) results = self.yolo_world(img_array, conf=0.15, iou=0.3, verbose=False) detections = [] for result in results: boxes = result.boxes for box in boxes: x1, y1, x2, y2 = box.xyxy[0].cpu().numpy() conf = float(box.conf[0]) detections.append({'box':[int(x1), int(y1), int(x2), int(y2)], 'label':'car', 'score':conf, 'method':'yolo-world'}) return detections # Simple CV detection fallback def advanced_cv_detection(self, image): img_array = np.array(image) gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) edges = cv2.Canny(gray, 30, 100) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,7)) closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel, iterations=2) contours, _ = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) h, w = gray.shape min_area, max_area = (h*w)//600, (h*w)//20 detections = [] for contour in contours: area = cv2.contourArea(contour) if min_area < area < max_area: x, y, cw, ch = cv2.boundingRect(contour) aspect_ratio = cw/ch if ch>0 else 0 if 0.40 else 0 if solidity > 0.4: detections.append({'box':[x, y, x+cw, y+ch], 'label':'car', 'score':0.7, 'method':'cv'}) return detections # Merge detections (NMS) def merge_all_detections(self, all_detections): if len(all_detections)==0: return [] normalized = [] for det in all_detections: x1, y1, x2, y2 = det['box'] normalized.append({'box':[x1,y1,x2,y2], 'score':det['score'], 'method':det.get('method','unknown')}) normalized.sort(key=lambda x: x['score'], reverse=True) keep=[] while normalized: best = normalized.pop(0) keep.append(best) normalized = [d for d in normalized if self.calculate_iou(best['box'], d['box'])<0.5] return keep def calculate_iou(self, box1, box2): x1_1, y1_1, x2_1, y2_1 = box1 x1_2, y1_2, x2_2, y2_2 = box2 x1_i, y1_i = max(x1_1,x1_2), max(y1_1,y1_2) x2_i, y2_i = min(x2_1,x2_2), min(y2_1,y2_2) if x2_i0 else 0 def create_annotated_image(self, image, detections): img_array = np.array(image) fig, ax = plt.subplots(1, figsize=(12,8)) ax.imshow(img_array) for i, det in enumerate(detections, 1): x1, y1, x2, y2 = det['box'] rect = patches.Rectangle((x1,y1,x2-x1,y2-y1), linewidth=3, edgecolor='#00ff00', facecolor='none') ax.add_patch(rect) ax.text(x1+(x2-x1)/2, y1+(y2-y1)/2, f"{i}", bbox=dict(facecolor='#00ff00', alpha=0.9, boxstyle='circle,pad=0.3'), fontsize=12, color='black', weight='bold', ha='center', va='center') ax.axis('off') plt.tight_layout() fig.canvas.draw() img_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) img_plot = img_plot.reshape(fig.canvas.get_width_height()[::-1]+(3,)) plt.close(fig) return Image.fromarray(img_plot) # --- Main function for Gradio --- def process_image(self, image, total_spaces): if image is None: return None, "Upload an image", "" self.total_spaces = total_spaces if isinstance(image, np.ndarray): image = Image.fromarray(image) all_dets=[] all_dets.extend(self.detect_with_florence(image)) all_dets.extend(self.detect_with_yolo_world(image)) all_dets.extend(self.advanced_cv_detection(image)) final_dets = self.merge_all_detections(all_dets) count = len(final_dets) annotated_img = self.create_annotated_image(image, final_dets) occupancy_rate = min((count/total_spaces)*100, 100) available = max(total_spaces-count,0) status = "🟢 AVAILABLE" if occupancy_rate<50 else "🟡 BUSY" if occupancy_rate<70 else "🟠 CRITICAL" if occupancy_rate<90 else "🔴 FULL" pred_time = (datetime.now()+timedelta(hours=available/5)).strftime('%I:%M %p') status_html = f"

{status}

" stats_text = f"Total Vehicles: {count}\nAvailable: {available}\nOccupancy: {occupancy_rate:.1f}%" return annotated_img, status_html, stats_text # --- Initialize Detector --- detector = StateOfTheArtParkingDetector() # --- Gradio Interface --- with gr.Blocks() as demo: gr.Markdown("## 🅿️ Smart Parking Management System") with gr.Row(): with gr.Column(): input_image = gr.Image(label="Parking Lot Image", type="pil") total_spaces_slider = gr.Slider(10, 200, value=50, step=5, label="Total Spaces") analyze_btn = gr.Button("Analyze Parking Lot") with gr.Column(): output_image = gr.Image(label="Detected Vehicles", type="pil") status_html = gr.HTML(label="Parking Status") stats_output = gr.Textbox(label="Detailed Stats", lines=10) analyze_btn.click(fn=detector.process_image, inputs=[input_image,total_spaces_slider], outputs=[output_image,status_html,stats_output]) demo.launch()