+
SentryBOT Camera Stream
+

+
Resolution: {self.server.resolution[0]}x{self.server.resolution[1]} - FPS: {self.server.fps}
+
Server IP: {self.server.server_ip}
+
+
+
+ """
+ content_bytes = content.encode('utf-8')
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/html')
+ self.send_header('Content-Length', len(content_bytes))
+ self.end_headers()
+ self.wfile.write(content_bytes)
+ elif self.path == '/stream.mjpg':
+ self.send_response(200)
+ self.send_header('Age', 0)
+ self.send_header('Cache-Control', 'no-cache, private')
+ self.send_header('Pragma', 'no-cache')
+ self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
+ self.end_headers()
+ try:
+ while True:
+ with self.server.output.condition:
+ self.server.output.condition.wait()
+ frame = self.server.output.frame
+ self.wfile.write(b'--FRAME\r\n')
+ self.send_header('Content-Type', 'image/jpeg')
+ self.send_header('Content-Length', len(frame))
+ self.end_headers()
+ self.wfile.write(frame)
+ self.wfile.write(b'\r\n')
+ except Exception as e:
+ if self.server.verbose_logging:
+ pub.sendMessage('log', msg=f"[CameraStreamer] Streaming error: {e}")
+ else:
+ self.send_error(404)
+ self.end_headers()
+
+ def log_message(self, format, *args):
+ if self.server.verbose_logging:
+ pub.sendMessage('log', msg=f"[CameraStreamer] {format % args}")
+
+ class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
+ allow_reuse_address = True
+ daemon_threads = True
+ output = self.output
+ verbose_logging = self.verbose_logging
+ resolution = self.resolution
+ fps = self.fps
+ server_ip = self.server_ip
+
+ try:
+ address = ('', self.port)
+ self.http_server = StreamingServer(address, StreamingHandler)
+ pub.sendMessage('log', msg=f"[CameraStreamer] HTTP server running at http://{self.server_ip}:{self.port}/")
+ pub.sendMessage('log', msg=f"[CameraStreamer] MJPEG stream available at http://{self.server_ip}:{self.port}/stream.mjpg")
+ self.http_server.serve_forever()
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"[CameraStreamer] HTTP server error: {e}")
+ self.running = False
+
+ def stop_streaming(self):
+ if not self.running:
+ return
+
+ self.running = False
+ pub.sendMessage('log', msg="[CameraStreamer] Stopping stream")
+
+ if self.picam:
+ try:
+ self.picam.stop()
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"[CameraStreamer] Error stopping camera: {e}")
+ self.picam = None
+
+ if self.http_server:
+ try:
+ self.http_server.shutdown()
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"[CameraStreamer] Error shutting down HTTP server: {e}")
+ self.http_server = None
+
+ gc.collect()
+ pub.sendMessage('log', msg="[CameraStreamer] Stream stopped")
diff --git a/modules/vision/remote/network/command_receiver.py b/modules/vision/remote/network/command_receiver.py
new file mode 100644
index 00000000..c2548be8
--- /dev/null
+++ b/modules/vision/remote/network/command_receiver.py
@@ -0,0 +1,819 @@
+import socket
+
+import json
+
+import threading
+
+import time
+
+import sys
+
+from pubsub import pub
+
+import traceback # Hata ayıklama için
+
+import logging # Use standard logging if available
+
+PI_SERVO_ANIMATIONS = {
+ "BOUNCE", "CELEBRATE", "HEAD_LEFT", "HEAD_NOD", "HEAD_NOD_ABS",
+ "HEAD_RIGHT", "HEAD_SHAKE", "HEAD_SHAKE_ABS", "LOOK_DOWN", "LOOK_UP",
+ "RAISED", "SCAN", "SIT", "SLEEP"
+}
+
+
+
+class CommandReceiver:
+
+ def __init__(self, **kwargs):
+
+ """
+
+ Receives commands from a remote computer (GUI) via socket and publishes
+
+ corresponding pubsub events for other robot modules to handle.
+
+
+
+ Configuration kwargs:
+
+ - port: Port number for command server (default: 8090)
+
+ - reuse_port: Allow port reuse (default: True, Linux only effective)
+
+ - autostart: Start the server automatically (default: True)
+
+ - max_retries: Max attempts to bind the port (default: 5)
+
+ - retry_delay: Delay between retry attempts in seconds (default: 1)
+
+ """
+
+ self.port = kwargs.get('port', 8090)
+
+ self.reuse_port = kwargs.get('reuse_port', True)
+
+ self.max_retries = kwargs.get('max_retries', 5)
+
+ self.retry_delay = kwargs.get('retry_delay', 1)
+
+ self.running = False
+
+ self.server_socket = None
+
+ self._client_threads = [] # Aktif client thread'lerini takip etmek için
+
+ self._lock = threading.Lock() # Thread güvenliği için
+
+
+
+ self.handlers = {
+ 'send_animation': self.handle_send_animation,
+ 'motion_event': self.handle_motion_event,
+ 'set_processing_mode': self.handle_processing_mode,
+ 'log': self.handle_log,
+ 'get_status': self.handle_get_status,
+ 'set_state': self.handle_set_state,
+ 'set_eye': self.handle_set_eye,
+ 'servo_move': self.handle_servo_move,
+ }
+
+ pub.subscribe(self.stop_receiver, 'exit') # Uygulama kapanırken durdur
+
+
+
+ if kwargs.get('autostart', True):
+
+ # Sunucuyu ayrı bir thread'de başlat
+
+ thread = threading.Thread(target=self.start_receiver, daemon=True)
+
+ thread.start()
+
+
+
+ def start_receiver(self):
+
+ """Start the command receiver server."""
+
+ if self.running:
+
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] Already running on port {self.port}")
+
+ return
+
+
+
+ self.running = True
+
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] Starting command receiver on port {self.port}")
+
+
+
+ for attempt in range(self.max_retries):
+
+ try:
+
+ self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ # Tekrar kullanım ayarları
+
+ self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ if self.reuse_port and hasattr(socket, 'SO_REUSEPORT'):
+
+ try:
+
+ self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+
+ except OSError:
+
+ pub.sendMessage('log:warning', msg="[CommandReceiver] SO_REUSEPORT not supported or failed.")
+
+
+
+ # accept() fonksiyonunun bloklamasını önlemek için timeout
+
+ self.server_socket.settimeout(1.0)
+
+ self.server_socket.bind(('0.0.0.0', self.port))
+
+ self.server_socket.listen(5)
+
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] Listening for commands on 0.0.0.0:{self.port}")
+
+
+
+ # Başarılı olursa bağlantı kabul etme döngüsünü başlat
+
+ self._accept_connections()
+
+ return # _accept_connections bittiğinde veya hata olduğunda çıkar
+
+
+
+ except OSError as e:
+
+ if self.server_socket:
+
+ self.server_socket.close()
+
+ self.server_socket = None
+
+
+
+ if e.errno == 98: # Address already in use
+
+ if attempt < self.max_retries - 1:
+
+ wait_time = self.retry_delay * (attempt + 1)
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Port {self.port} in use. Retrying in {wait_time}s... (Attempt {attempt + 1}/{self.max_retries})")
+
+ time.sleep(wait_time)
+
+ else:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Port {self.port} still in use after {self.max_retries} attempts. Failed to start.")
+
+ self.running = False
+
+ return
+
+ else:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Socket OS Error on startup (bind/listen): {e}")
+
+ self.running = False
+
+ return
+
+ except Exception as e:
+
+ if self.server_socket:
+
+ self.server_socket.close()
+
+ self.server_socket = None
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Unexpected error on startup: {e}")
+
+ pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}")
+
+ self.running = False
+
+ return
+
+
+
+ if not self.running:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Failed to start after all attempts.")
+
+
+
+ def _accept_connections(self):
+
+ """Accept incoming connections and handle clients in separate threads."""
+
+ while self.running:
+
+ client_socket = None
+
+ try:
+
+ # server_socket None ise veya kapatılmışsa çık
+
+ if not self.server_socket:
+
+ pub.sendMessage('log:info', msg="[CommandReceiver] Server socket is closed. Stopping accept loop.")
+
+ break
+
+
+
+ # accept() timeout ile çalışır (1 saniye)
+
+ client_socket, addr = self.server_socket.accept()
+
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] Connection accepted from {addr}")
+
+
+
+ # KeepAlive ayarları (isteğe bağlı, bağlantı kopmalarını algılamak için)
+
+ try:
+
+ client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+
+ # Linux için daha detaylı ayarlar
+
+ if hasattr(socket, "TCP_KEEPIDLE"): client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
+
+ if hasattr(socket, "TCP_KEEPINTVL"): client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 10)
+
+ if hasattr(socket, "TCP_KEEPCNT"): client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
+
+ except OSError as ke_err:
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Could not set KeepAlive options for {addr}: {ke_err}")
+
+
+
+
+
+ # Her istemci için ayrı bir thread başlat
+
+ client_thread = threading.Thread(target=self._handle_client, args=(client_socket, addr), daemon=True)
+
+ client_thread.start()
+
+ with self._lock:
+
+ self._client_threads.append(client_thread)
+
+ # Tamamlanmış thread'leri temizle (isteğe bağlı)
+
+ self._cleanup_threads()
+
+
+
+ except socket.timeout:
+
+ # Timeout normal, döngüye devam et ve self.running kontrol et
+
+ continue
+
+ except OSError as e:
+
+ # Soket kapatıldığında (stop_receiver) veya başka bir OS hatasında
+
+ if self.running: # Sadece bilerek durdurulmadıysa logla
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] OS Error during accept(): {e}")
+
+ break # Hata durumunda döngüden çık
+
+ except Exception as e:
+
+ if self.running:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Unexpected error accepting connection: {e}")
+
+ pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}")
+
+ if client_socket: client_socket.close() # Hata oluştuysa soketi kapat
+
+ break # Beklenmedik hatalarda döngüden çık
+
+
+
+ pub.sendMessage('log:info', msg="[CommandReceiver] Accept loop finished.")
+
+ self.running = False # Döngü bittiğinde durumu güncelle
+
+
+
+ def _handle_client(self, client_socket, addr):
+
+ """Handle communication with a single connected client."""
+
+ buffer_size = 4096
+
+ client_socket.settimeout(60.0) # İstemciden veri gelmezse 60 saniye bekle
+
+ full_data = b""
+
+ separator = b'\n' # Komut ayıracı olarak newline
+
+
+
+ pub.sendMessage('log:debug', msg=f"[CommandReceiver] Handling client {addr}")
+
+
+
+ try:
+
+ while self.running:
+
+ try:
+
+ chunk = client_socket.recv(buffer_size)
+
+ if not chunk:
+
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] Client {addr} disconnected.")
+
+ break # İstemci bağlantıyı kapattı
+
+
+
+ full_data += chunk
+
+
+
+ # Tam komutları işle (newline ile ayrılmış)
+
+ while separator in full_data:
+
+ command_part, full_data = full_data.split(separator, 1)
+
+ if command_part:
+
+ self._process_command(command_part, client_socket, addr)
+
+
+
+ except socket.timeout:
+
+ # Timeout oldu, belki bir keepalive gönderilebilir veya sadece beklenir
+
+ # pub.sendMessage('log:debug', msg=f"[CommandReceiver] Timeout waiting for data from {addr}.")
+
+ # Bağlantıyı kontrol etmek için boş byte göndermeyi dene
+
+ try:
+
+ client_socket.send(b'')
+
+ except OSError:
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Client {addr} connection lost after timeout check.")
+
+ break
+
+ continue # Timeout normal, dinlemeye devam et
+
+ except ConnectionResetError:
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Connection reset by client {addr}.")
+
+ break
+
+ except OSError as e:
+
+ # Soket hatası (örn: kapatılmış sokete yazma/okuma)
+
+ if self.running:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Socket OS Error with client {addr}: {e}")
+
+ break
+
+ except Exception as e:
+
+ if self.running:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Unexpected error receiving from {addr}: {e}")
+
+ pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}")
+
+ break # Diğer hatalarda çık
+
+
+
+ except Exception as e:
+
+ # Thread genelinde beklenmedik bir hata
+
+ if self.running:
+
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Error in client handler thread for {addr}: {e}")
+
+ pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}")
+
+ finally:
+
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] Closing connection with {addr}")
+
+ try:
+
+ client_socket.shutdown(socket.SHUT_RDWR)
+
+ except OSError:
+
+ pass # Zaten kapalıysa veya hata verirse önemseme
+
+ except Exception as shut_e:
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Error during socket shutdown for {addr}: {shut_e}")
+
+ try:
+
+ client_socket.close()
+
+ except Exception as close_e:
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Error during socket close for {addr}: {close_e}")
+
+
+
+
+
+ def _process_command(self, data, client_socket, addr):
+
+ """Parse the received data as JSON and publish corresponding pubsub events."""
+ response_data = {'status': 'error', 'message': 'Unknown processing error'}
+ command_type = "unknown"
+ params = {}
+
+ try:
+ decoded_data = data.decode('utf-8').strip()
+ if not decoded_data:
+ return
+
+ command_data = json.loads(decoded_data)
+ command_type = command_data.get('command', 'unknown')
+ params = command_data.get('params', {})
+
+ pub.sendMessage('log:debug', msg=f"[CommandReceiver] DEBUG: Received RAW command data: {command_data}")
+ pub.sendMessage('log:info', msg=f"[CommandReceiver] RX from {addr}: CMD={command_type}, PARAMS={params}")
+
+ response_data = self.process_command({'CMD': command_type, 'PARAMS': params})
+
+ try:
+ response_json = json.dumps(response_data) + '\n'
+ client_socket.sendall(response_json.encode('utf-8'))
+ pub.sendMessage('log:debug', msg=f"[CommandReceiver] Response sent: {response_data}")
+ except Exception as send_err:
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Error sending response: {send_err}")
+
+ except json.JSONDecodeError:
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Invalid JSON: {data.decode('utf-8', errors='ignore')[:100]}")
+ response = {'status': 'error', 'message': 'Invalid JSON format'}
+ try:
+ client_socket.sendall((json.dumps(response) + '\n').encode('utf-8'))
+ except Exception as send_err:
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Error sending JSON error response: {send_err}")
+
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Error processing command: {e}")
+ response = {'status': 'error', 'message': f"Internal error: {e}", 'command': command_type}
+ try:
+ client_socket.sendall((json.dumps(response) + '\n').encode('utf-8'))
+ except Exception as send_err:
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] Error sending error response: {send_err}")
+
+
+
+ def process_command(self, command_json):
+ """Process a command using registered handlers."""
+ command = command_json.get('CMD')
+ params = command_json.get('PARAMS', {})
+
+ if command in self.handlers:
+ try:
+ result = self.handlers[command](params)
+ return {'status': 'ok', 'message': result or f'{command} executed'}
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"Error handling command '{command}': {e}")
+ logging.exception(f"Error handling command '{command}'")
+ return {'status': 'error', 'message': str(e)}
+ else:
+ pub.sendMessage('log:warning', msg=f"Unknown command received: {command}")
+ return {'status': 'error', 'message': f'Unknown command: {command}'}
+
+
+
+ def handle_log(self, params):
+ """Handles log messages sent from the GUI."""
+ message = params.get('message', '')
+ level = params.get('level', 'info').lower()
+ log_msg = f"[GUI] {message}"
+
+ if level == 'error':
+ pub.sendMessage('log:error', msg=log_msg)
+ elif level == 'warning':
+ pub.sendMessage('log:warning', msg=log_msg)
+ else:
+ pub.sendMessage('log:info', msg=log_msg)
+ return "Log message received"
+
+
+
+ def handle_motion_event(self, params):
+ """Forwards motion event data via pubsub."""
+ pub.sendMessage('motion_event', data=params)
+ return "Motion event forwarded"
+
+
+
+ def handle_processing_mode(self, params):
+ """Forwards processing mode change via pubsub."""
+ pub.sendMessage('set_processing_mode', data=params)
+ return "Processing mode change forwarded"
+
+
+ def handle_send_animation(self, params):
+ """
+ Forwards animation request via pubsub.
+ Servo animasyonları actuator/servo.py ve animate.py ile uyumlu şekilde,
+ NeoPixel animasyonları ise led:animate ile gönderilir.
+ """
+ animation_name = params.get('animation')
+ color = params.get('color')
+ color2 = params.get('color2')
+ repeat = params.get('repeat', 1)
+
+ if not animation_name:
+ pub.sendMessage('log:error', msg="[CommandReceiver] Missing animation name.")
+ return {"status": "error", "message": "Missing animation name"}
+
+ animation_name_upper = animation_name.upper()
+
+ if animation_name_upper in PI_SERVO_ANIMATIONS:
+ # Servo animasyonları actuator/servo.py ve animate.py ile uyumlu şekilde gönderilecek
+ try:
+ for _ in range(int(repeat)):
+ pub.sendMessage('animate', action=animation_name.lower())
+ log_msg = f"Servo animation sent to Animate module: {animation_name} (repeat={repeat})"
+ pub.sendMessage('log:info', msg=log_msg)
+ return {"status": "ok", "message": log_msg}
+ except Exception as e:
+ error_msg = f"Error routing servo animation: {e}"
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] {error_msg}")
+ pub.sendMessage('log:debug', msg=traceback.format_exc())
+ return {"status": "error", "message": error_msg}
+ else:
+ # NeoPixel animasyonlarını çalıştırmak için
+ try:
+ pub.sendMessage('led:animate',
+ animation=animation_name,
+ color=color,
+ color2=color2,
+ repeat=repeat)
+ log_msg = f"Forwarded to NeoPx module: {animation_name}"
+ pub.sendMessage('log:info', msg=log_msg)
+ return {"status": "ok", "message": log_msg}
+ except Exception as e:
+ error_msg = f"Error in NeoPx module: {e}"
+ pub.sendMessage('log:error', msg=f"[CommandReceiver] {error_msg}")
+ pub.sendMessage('log:debug', msg=traceback.format_exc())
+ return {"status": "error", "message": error_msg}
+
+
+ def handle_get_status(self, params):
+ pub.sendMessage('cmd:personality:get_status')
+ return "Status request forwarded"
+
+
+
+ def handle_set_state(self, params):
+ pub.sendMessage('cmd:personality:set_state', state=params.get('state'))
+ return "Set state request forwarded"
+
+
+
+ def handle_set_eye(self, params):
+ pub.sendMessage('cmd:personality:set_eye', color=params.get('color'))
+ return "Set eye color request forwarded"
+
+
+
+ def handle_servo_move(self, params):
+ pub.sendMessage('servo:move', data=params)
+ return "Servo move request forwarded"
+
+
+
+ def _cleanup_threads(self):
+
+ """Remove completed client threads from the list."""
+
+ with self._lock:
+
+ self._client_threads = [t for t in self._client_threads if t.is_alive()]
+
+
+
+ def stop_receiver(self):
+
+ """Stop the command receiver server."""
+
+ if not self.running:
+
+ return
+
+
+
+ pub.sendMessage('log:info', msg="[CommandReceiver] Stopping command receiver...")
+
+ self.running = False # Thread'lerin durması için işaret ver
+
+
+
+ # Sunucu soketini kapat (accept() bloklamasını kaldırabilir)
+
+ if self.server_socket:
+
+ server_socket_to_close = self.server_socket
+
+ self.server_socket = None # Tekrar kullanılmasını engelle
+
+ try:
+
+ server_socket_to_close.close()
+
+ pub.sendMessage('log:info', msg="[CommandReceiver] Server socket closed.")
+
+ except Exception as e:
+
+ pub.sendMessage('log:warning', msg=f"[CommandReceiver] Error closing server socket: {e}")
+
+
+
+ # Aktif istemci thread'lerini bekle (kısa bir süre)
+
+ with self._lock:
+
+ threads_to_join = self._client_threads[:]
+
+ for thread in threads_to_join:
+
+ try:
+
+ thread.join(timeout=0.5) # Max 0.5 saniye bekle
+
+ except Exception as e:
+
+ pub.sendMessage('log:warning', f"[CommandReceiver] Error joining client thread: {e}")
+
+
+
+ self._cleanup_threads() # Son temizlik
+
+ pub.sendMessage('log:info', msg="[CommandReceiver] Stopped.")
+
+
+
+# Örnek Kullanım (Eğer bu dosya direkt çalıştırılırsa test için)
+
+if __name__ == '__main__':
+
+ from pubsub import pub
+
+ import logging
+
+ logging.basicConfig(level=logging.DEBUG)
+
+
+
+ # Basit bir log listener (pubsub mesajlarını yazdırmak için)
+
+ class LogListener:
+
+ def __call__(self, msg, level='debug'):
+
+ print(f"[{level.upper()}] {msg}")
+
+ def info(self, msg): print(f"[INFO] {msg}")
+
+ def warning(self, msg): print(f"[WARNING] {msg}")
+
+ def error(self, msg): print(f"[ERROR] {msg}")
+
+ def debug(self, msg): print(f"[DEBUG] {msg}")
+
+
+
+ log_listener = LogListener()
+
+ pub.subscribe(log_listener.info, 'log:info')
+
+ pub.subscribe(log_listener.warning, 'log:warning')
+
+ pub.subscribe(log_listener.error, 'log:error')
+
+ pub.subscribe(log_listener.debug, 'log:debug')
+
+
+
+ # Başka modüllerden gelen komutları dinleyen basit listener'lar
+
+ def handle_servo(servo, value, absolute):
+
+ print(f"[SERVO HANDLER] Received: servo={servo}, value={value}, absolute={absolute}")
+
+ def handle_animate(action):
+
+ print(f"[ANIMATE HANDLER] Received action: {action}")
+
+ def handle_tts(text, lang):
+
+ print(f"[TTS HANDLER] Received text: '{text}', lang: {lang}")
+
+ def handle_buzzer(song):
+
+ print(f"[BUZZER HANDLER] Received song: {song}")
+
+ def handle_personality_set(name):
+
+ print(f"[PERSONALITY HANDLER] Set personality: {name}")
+
+ def handle_pers_get_list():
+
+ print(f"[PERSONALITY HANDLER] Get list requested")
+
+ # Gerçek uygulamada burada pub.sendMessage('data:personalities', ...) yapılır
+
+ def handle_pers_set_eye(color):
+
+ print(f"[PERSONALITY HANDLER] Set eye: {color}")
+
+ def handle_pers_set_state(state):
+
+ print(f"[PERSONALITY HANDLER] Set state: {state}")
+
+ def handle_pers_get_status():
+
+ print(f"[PERSONALITY HANDLER] Get status requested")
+
+ # Gerçek uygulamada burada pub.sendMessage('data:robot_status', ...) yapılır
+
+
+
+ pub.subscribe(handle_servo, 'cmd:servo')
+
+ pub.subscribe(handle_animate, 'cmd:animate')
+
+ pub.subscribe(handle_tts, 'cmd:tts')
+
+ pub.subscribe(handle_buzzer, 'cmd:buzzer')
+
+ pub.subscribe(handle_personality_set, 'cmd:personality:set')
+
+ pub.subscribe(handle_pers_get_list, 'cmd:personality:get_list')
+
+ pub.subscribe(handle_pers_set_eye, 'cmd:personality:set_eye')
+
+ pub.subscribe(handle_pers_set_state, 'cmd:personality:set_state')
+
+ pub.subscribe(handle_pers_get_status, 'cmd:personality:get_status')
+
+
+
+
+
+ print("Starting Command Receiver for testing...")
+
+ receiver = CommandReceiver(port=8090, autostart=False)
+
+ receiver.start_receiver() # Ayrı thread'de başlamaz, direkt çalışır
+
+
+
+ print("Command Receiver running. Press Ctrl+C to stop.")
+
+ try:
+
+ while receiver.running:
+
+ time.sleep(1)
+
+ except KeyboardInterrupt:
+
+ print("\nCtrl+C received, stopping...")
+
+ receiver.stop_receiver()
+
+ print("Test finished.")
+
diff --git a/modules/vision/remote/network/robot_to_gui_sender.py b/modules/vision/remote/network/robot_to_gui_sender.py
new file mode 100644
index 00000000..6d55bb49
--- /dev/null
+++ b/modules/vision/remote/network/robot_to_gui_sender.py
@@ -0,0 +1,386 @@
+import socket
+import json
+import threading
+import time
+from pubsub import pub
+import traceback
+
+class RobotToGuiSender:
+ def __init__(self, **kwargs):
+ """
+ Listens to specific pubsub topics on the robot and forwards the data
+ to the remote GUI listener over a socket connection.
+
+ Configuration kwargs:
+ - gui_ip: IP address of the computer running the GUI (required).
+ - gui_port: Port number the GUI listener is running on (required).
+ - reconnect_delay: Delay in seconds before attempting to reconnect (default: 5).
+ - forward_topics: List of pubsub topics to listen to and forward (required).
+ - autostart: Start the sender automatically (default: True).
+ """
+ self.gui_ip = kwargs.get('gui_ip')
+ self.gui_port = kwargs.get('gui_port')
+ self.reconnect_delay = kwargs.get('reconnect_delay', 5)
+ self.forward_topics = kwargs.get('forward_topics', [])
+ self.running = False
+ self.connected = False
+ self.client_socket = None
+ self._send_lock = threading.Lock() # Sokete yazma işlemleri için kilit
+ self._thread = None
+ self.data_topics_to_forward = kwargs.get('forward_topics', [])
+
+ # --- GUI IP ve Port kontrolü ---
+ if not self.gui_ip or not self.gui_port:
+ # Doğrudan loglama kullan, çünkü pubsub döngüye neden olabilir
+ print("[GuiSender] ERROR: GUI IP address and Port are required in configuration!")
+ self._autostart = False
+ # Hata durumunda modülün yüklenmesini engellemek daha iyi olabilir
+ # raise ValueError("GUI IP and Port configuration missing for RobotToGuiSender")
+ return
+ else:
+ self._autostart = kwargs.get('autostart', True)
+
+ # --- PubSub Abonelikleri ---
+ # NOT: Kendi ürettiği logları dinlememesi için log:* topiclerine abone OLMAYACAK.
+ # Loglar doğrudan loglama sistemi tarafından ele alınmalı.
+ # Sadece veri topiclerine abone olmalı.
+ self.data_topics_to_forward = [t for t in self.forward_topics if not t.startswith('log:')]
+
+ if self.data_topics_to_forward:
+ for topic in self.data_topics_to_forward:
+ try:
+ # handle_message'ı doğrudan kullanmak yerine topic'e özel handler tanımlayabiliriz
+ # veya handle_message içinde topic adına göre dallanabiliriz.
+ # Şimdilik handle_message kullanalım.
+ pub.subscribe(self.handle_message, topic)
+ print(f"[GuiSender] INFO: Subscribed to data topic: {topic}")
+ except Exception as e:
+ print(f"[GuiSender] ERROR: Failed to subscribe to topic {topic}: {e}")
+ else:
+ print("[GuiSender] WARNING: No non-log topics specified to forward in configuration.")
+
+ # --- Exit Sinyali ---
+ pub.subscribe(self.stop_sender, 'exit')
+
+ # --- Otomatik BaÅŸlatma ---
+ if self._autostart:
+ self.start_sender()
+
+ def start_sender(self):
+ """Starts the sender thread which tries to connect and send data."""
+ if self.running:
+ print("[GuiSender] INFO: Sender already running.")
+ return
+
+ self.running = True
+ self._thread = threading.Thread(target=self._run, daemon=True)
+ self._thread.start()
+ print(f"[GuiSender] INFO: Started. Attempting to connect to GUI at {self.gui_ip}:{self.gui_port}")
+
+ def _connect(self):
+ """Attempts to establish a connection to the GUI listener."""
+ if self.connected:
+ return True
+
+ print(f"[GuiSender] DEBUG: Attempting connection to {self.gui_ip}:{self.gui_port}...")
+ try:
+ if self.client_socket:
+ try: self.client_socket.close()
+ except: pass # Kapatma hatasını yoksay
+
+ self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.client_socket.settimeout(5.0)
+ self.client_socket.connect((self.gui_ip, self.gui_port))
+ self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ # Diğer KeepAlive ayarları...
+
+ self.connected = True
+ print(f"[GuiSender] INFO: Connected successfully to GUI at {self.gui_ip}:{self.gui_port}")
+ # Bağlantı başarılı mesajı gönder
+ # Bu mesaj handle_message'dan geçmeyeceği için döngüye girmez
+ self._send_data({'command': 'hello_from_robot', 'params': {'message': 'Robot connected'}})
+ return True
+ except socket.timeout:
+ # Timeout logunu doğrudan yazdır
+ print(f"[GuiSender] WARNING: Connection attempt timed out to {self.gui_ip}:{self.gui_port}.")
+ if self.client_socket: self.client_socket.close()
+ self.client_socket = None
+ self.connected = False
+ return False
+ except OSError as e:
+ print(f"[GuiSender] WARNING: Connection OS Error to {self.gui_ip}:{self.gui_port} - {e}")
+ if self.client_socket: self.client_socket.close()
+ self.client_socket = None
+ self.connected = False
+ return False
+ except Exception as e:
+ print(f"[GuiSender] ERROR: Unexpected error connecting to GUI: {e}")
+ print(f"[GuiSender] DEBUG: Traceback: {traceback.format_exc()}")
+ if self.client_socket: self.client_socket.close()
+ self.client_socket = None
+ self.connected = False
+ return False
+
+ def _run(self):
+ """Main loop for the sender thread. Tries to stay connected."""
+ while self.running:
+ if not self.connected:
+ if not self._connect():
+ if self.running:
+ print(f"[GuiSender] INFO: Connection failed. Retrying in {self.reconnect_delay} seconds...")
+ time.sleep(self.reconnect_delay)
+ continue
+
+ # Bağlantı varsa, bağlantının kopup kopmadığını kontrol et
+ try:
+ time.sleep(1) # Sürekli CPU kullanmamak için kısa bekleme
+ # Keepalive kontrolü (isteğe bağlı, daha sık yapılabilir)
+ if self.running and self.connected and self.client_socket:
+ # Her 5 saniyede bir boş byte göndererek test et
+ if int(time.time()) % 5 == 0:
+ with self._send_lock:
+ if self.client_socket: # Tekrar kontrol
+ self.client_socket.send(b'')
+ except (OSError, BrokenPipeError):
+ # Keepalive sırasında bağlantı kopmuş
+ print("[GuiSender] WARNING: Connection lost (detected by keepalive). Will reconnect.")
+ self.connected = False
+ if self.client_socket:
+ try: self.client_socket.close()
+ except: pass
+ self.client_socket = None
+ except Exception as e:
+ print(f"[GuiSender] ERROR: Error during keepalive/wait loop: {e}")
+ self.connected = False
+ if self.client_socket:
+ try: self.client_socket.close()
+ except: pass
+ self.client_socket = None
+
+
+ # Döngü bittiğinde temizlik
+ if self.client_socket:
+ print("[GuiSender] INFO: Closing connection to GUI.")
+ try:
+ with self._send_lock:
+ socket_to_close = self.client_socket
+ self.client_socket = None
+ self.connected = False
+ if socket_to_close: socket_to_close.close()
+ except Exception as e:
+ print(f"[GuiSender] WARNING: Error closing socket: {e}")
+ self.connected = False
+ print("[GuiSender] INFO: Run loop finished.")
+
+
+ def handle_message(self, topic=pub.AUTO_TOPIC, status=None, personalities=None, **kwargs):
+ """
+ Callback function for subscribed DATA pubsub topics.
+ Explicitly defines expected arguments and uses **kwargs for others.
+ """
+ if not self.running or not self.connected:
+ return
+
+ topic_name = topic.getName()
+ data_to_send = None
+
+ if topic_name == 'data:robot_status':
+ # 'status' artık doğrudan bir argüman, kwargs'dan almaya gerek yok
+ if status:
+ # Değişiklik tespiti: Önceki durum ile karşılaştır
+ if hasattr(self, '_last_status'):
+ # Eğer durum, göz rengi ve kişilik aynıysa göndermeden çık
+ if (self._last_status.get('state') == status.get('state') and
+ self._last_status.get('eye_color') == status.get('eye_color')):
+ return # Değişiklik yoksa gönderme
+
+ # Durumu kaydet ve değişiklikleri gönder
+ self._last_status = status.copy() # Kopyalayarak kaydet
+ data_to_send = {
+ 'command': 'update_status',
+ 'params': status
+ }
+ else:
+ print(f"[GuiSender] WARNING: Received 'data:robot_status' but 'status' argument was None.")
+
+ # Diğer 'data:' topicleri (varsa ve forward_topics içindeyse) kwargs üzerinden işlenebilir
+ elif topic_name in self.data_topics_to_forward:
+ print(f"[GuiSender] WARNING: Received subscribed data topic '{topic_name}' - passing kwargs.")
+ # Genel formatta gönder (kwargs içindeki *diğer* veriler varsa)
+ data_to_send = {
+ 'command': 'generic_robot_event',
+ 'params': {'topic': topic_name, 'data': kwargs} # Sadece kalan kwargs'ı gönder
+ }
+
+ if data_to_send:
+ self._send_data(data_to_send)
+
+ def _send_data(self, data_dict):
+ """Sends the formatted data dictionary to the GUI as JSON."""
+ # Bu fonksiyon artık sadece bağlıysa çağrılmalı (handle_message kontrol ediyor)
+ # Ama yine de çift kontrol yapalım
+ if not self.connected or not self.client_socket:
+ # Bu mesajı loglamak yerine doğrudan yazdırmak daha güvenli
+ # print("[GuiSender] WARNING: _send_data called while not connected (should not happen often).")
+ return False
+
+ try:
+ json_data = json.dumps(data_dict) + '\n'
+ encoded_data = json_data.encode('utf-8')
+
+ with self._send_lock:
+ # Kilidi aldıktan sonra tekrar kontrol et
+ if not self.connected or not self.client_socket: return False
+ self.client_socket.sendall(encoded_data)
+ # Başarılı gönderimi debug olarak logla (pubsub kullanma!)
+ # print(f"[GuiSender] DEBUG: Sent data: {data_dict.get('command', 'N/A')} ({len(encoded_data)} bytes)")
+ return True
+
+ except (BrokenPipeError, ConnectionResetError, OSError) as e:
+ # Bağlantı hatası logunu doğrudan yazdır
+ print(f"[GuiSender] WARNING: Connection error while sending data: {e}. Marking as disconnected.")
+ self.connected = False
+ if self.client_socket:
+ try: self.client_socket.close()
+ except: pass
+ self.client_socket = None
+ # Başarısız gönderimi debug olarak logla (pubsub kullanma!)
+ # print(f"[GuiSender] DEBUG: Failed to send: {data_dict}")
+ return False
+ except Exception as e:
+ print(f"[GuiSender] ERROR: Unexpected error sending data: {e}")
+ print(f"[GuiSender] DEBUG: Traceback: {traceback.format_exc()}")
+ self.connected = False # Hata durumunda bağlantıyı kesik say
+ if self.client_socket:
+ try: self.client_socket.close()
+ except: pass
+ self.client_socket = None
+ return False
+
+ def stop_sender(self):
+ """Stops the sender thread and closes the connection."""
+ if not self.running:
+ return
+
+ print("[GuiSender] INFO: Stopping sender...")
+ self.running = False
+
+ # Thread'in bitmesini bekle
+ if self._thread and self._thread.is_alive():
+ try:
+ self._thread.join(timeout=1.0)
+ if self._thread.is_alive():
+ print("[GuiSender] WARNING: Sender thread did not terminate gracefully.")
+ except Exception as e:
+ print(f"[GuiSender] WARNING: Error joining sender thread: {e}")
+
+ # Soketi son kez kapatmayı dene
+ if self.client_socket:
+ try:
+ with self._send_lock:
+ socket_to_close = self.client_socket
+ self.client_socket = None
+ self.connected = False
+ if socket_to_close: socket_to_close.close()
+ except Exception as e:
+ print(f"[GuiSender] WARNING: Error during final socket close: {e}")
+
+ print("[GuiSender] INFO: Stopped.")
+
+# Örnek Kullanım (Test için - Değiştirilmedi)
+if __name__ == '__main__':
+ import logging
+ # Loglamayı DEBUG seviyesine ayarlayalım ki print mesajları görünsün
+ # logging.basicConfig(level=logging.INFO) # INFO yeterli olabilir
+ print("--- RobotToGuiSender Test Başlatılıyor ---")
+
+ # Basit bir TCP sunucusu (GUI listener'ı simüle etmek için)
+ def gui_listener_simulator(port):
+ print(f"[SIMULATOR] GUI listener simulator başlatılıyor (Port: {port})...")
+ server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ server_socket.bind(('0.0.0.0', port))
+ server_socket.listen(1)
+ while True:
+ print("[SIMULATOR] Robot bağlantısı bekleniyor...")
+ client_socket, addr = server_socket.accept()
+ print(f"[SIMULATOR] Robot bağlandı: {addr}")
+ try:
+ buffer = b""
+ separator = b'\n'
+ while True:
+ chunk = client_socket.recv(1024)
+ if not chunk:
+ print("[SIMULATOR] Robot bağlantısı kapandı.")
+ break
+ buffer += chunk
+ while separator in buffer:
+ line, buffer = buffer.split(separator, 1)
+ if line:
+ print(f"[SIMULATOR] Robot'tan Alındı: {line.decode('utf-8').strip()}")
+ except ConnectionResetError:
+ print("[SIMULATOR] Robot bağlantısı sıfırlandı.")
+ except Exception as e:
+ print(f"[SIMULATOR] Veri alım hatası: {e}")
+ finally:
+ try: client_socket.close()
+ except: pass
+ print("[SIMULATOR] Robot soketi kapatıldı.")
+ except Exception as e:
+ print(f"[SIMULATOR] Listener hatası: {e}")
+ finally:
+ try: server_socket.close()
+ except: pass
+ print("[SIMULATOR] Listener durduruldu.")
+
+ gui_port = 8091
+ simulator_thread = threading.Thread(target=gui_listener_simulator, args=(gui_port,), daemon=True)
+ simulator_thread.start()
+ time.sleep(1) # Simülatörün başlaması için bekle
+
+ print("RobotToGuiSender test için başlatılıyor...")
+ gui_ip = "127.0.0.1" # veya GUI'nin çalıştığı IP
+ # SADECE veri topiclerini ilet:
+ sender = RobotToGuiSender(
+ gui_ip=gui_ip,
+ gui_port=gui_port,
+ forward_topics=['data:robot_status', 'data:other_event'], # Sadece data topicleri
+ autostart=False
+ )
+ sender.start_sender()
+
+ # Pubsub logları için ayrı bir handler (GuiSender'dan bağımsız)
+ class PrintLogListener:
+ def __call__(self, msg, level='debug', **kwargs):
+ print(f"[PUBSUB LOG - {level.upper()}] {msg}")
+ log_listener = PrintLogListener()
+ pub.subscribe(log_listener, pub.ALL_TOPICS) # Tüm logları yakala
+
+ print("Sender çalışıyor. Test mesajları yayınlanıyor...")
+ try:
+ count = 0
+ while True:
+ count += 1
+ # LOG mesajları yayınla (GuiSender bunları GÖNDERMEMELİ, sadece PrintLogListener yazmalı)
+ pub.sendMessage('log:info', msg=f"Bu bir info mesajı #{count}")
+ if count % 3 == 0:
+ pub.sendMessage('log:error', msg=f"Bu bir HATA mesajı #{count//3}")
+
+ # VERİ mesajları yayınla (GuiSender bunları GÖNDERMELİ)
+ if count % 5 == 0:
+ status = {'state': f'STATE_{count%4}', 'eye_color': ['blue','red','green'][count%3], 'timestamp': time.time()}
+ pub.sendMessage('data:robot_status', status=status)
+ print(f"*** 'data:robot_status' yayınlandı (Count: {count}) ***")
+ if count % 7 == 0:
+ pub.sendMessage('data:other_event', data=f"BaÅŸka veri {count}")
+ print(f"*** 'data:other_event' yayınlandı (Count: {count}) ***")
+
+
+ time.sleep(2)
+ except KeyboardInterrupt:
+ print("\nCtrl+C alındı, sender durduruluyor...")
+ sender.stop_sender()
+
+ # Simulatör threadinin bitmesini bekleme (daemon olduğu için otomatik biter)
+ print("Test bitti.")
diff --git a/modules/vision/remote/tracking.py b/modules/vision/remote/tracking.py
new file mode 100644
index 00000000..bfaa6efe
--- /dev/null
+++ b/modules/vision/remote/tracking.py
@@ -0,0 +1,62 @@
+from pubsub import pub
+from threading import Thread
+
+class Tracking:
+ TRACKING_THRESHOLD = 30
+ TRACKING_MOVE_PERCENT = 10
+
+ def __init__(self, vision, **kwargs):
+ self.vision = vision
+ self.active = kwargs.get('active', False)
+ self.use_thread = kwargs.get('thread', False)
+ if not self.use_thread:
+ pub.subscribe(self.loop, 'loop')
+ pub.subscribe(self.set_state, 'rest', active=True)
+ pub.subscribe(self.set_state, 'wake', active=True)
+ pub.subscribe(self.set_state, 'sleep', active=False)
+ pub.subscribe(self.set_state, 'exit', active=False)
+
+ def set_state(self, active):
+ self.active = active
+ if active and self.use_thread:
+ Thread(target=self.loop_thread, args=()).start()
+
+ def loop_thread(self):
+ while self.active:
+ self.track_largest_match()
+
+ def loop(self):
+ if not self.active:
+ return
+ self.track_largest_match()
+
+ def track_largest_match(self):
+ largest = self._largest(self.vision.detect())
+ pub.sendMessage('tracking:match', largest=largest, screen=getattr(self.vision, 'dimensions', (640,480)))
+ if largest is None:
+ return False
+ (x, y, w, h) = largest
+ x_move = Tracking.calc_move_amount(getattr(self.vision, 'dimensions', (640,))[0], x, w)
+ y_move = Tracking.calc_move_amount(getattr(self.vision, 'dimensions', (480,))[0], y, h)
+ if x_move:
+ pub.sendMessage('servo:pan:mv', percentage=x_move)
+ if y_move:
+ pub.sendMessage('servo:tilt:mv', percentage=-y_move)
+ return True
+
+ @staticmethod
+ def calc_move_amount(screen_w, target_pos, target_w):
+ screen_c = screen_w / 2
+ target_c = target_pos + (target_w / 2)
+ if abs(screen_c - target_c) > Tracking.TRACKING_THRESHOLD:
+ return round((screen_c - target_c) / Tracking.TRACKING_MOVE_PERCENT)
+ return 0
+
+ def _largest(self, matches):
+ largest = None
+ if matches:
+ for match in matches:
+ if largest is None or self.vision.get_area(match) > self.vision.get_area(largest):
+ largest = match
+ return largest
+ return None
diff --git a/modules/vision/remote/train_model.py b/modules/vision/remote/train_model.py
new file mode 100644
index 00000000..542e0263
--- /dev/null
+++ b/modules/vision/remote/train_model.py
@@ -0,0 +1,250 @@
+from imutils import paths
+import face_recognition
+import pickle
+import cv2
+import os
+import shutil
+from pubsub import pub
+import time
+import json
+import numpy as np
+
+class TrainModel:
+ def __init__(self, **kwargs):
+ self.path = kwargs.get('dataset', 'dataset')
+ self.output = kwargs.get('output', 'encodings.pickle')
+ self.model = kwargs.get('detection_method', 'hog') # 'hog' or 'cnn'
+ self.jitters = kwargs.get('jitters', 1) # Daha yüksek değerler daha doğru ama daha yavaş
+ self.progress_callback = kwargs.get('progress_callback', None)
+
+ def ensure_dataset_exists(self):
+ """Dataset klasörünün varlığını kontrol et ve yoksa oluştur"""
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ pub.sendMessage('log', msg=f'[TrainModel] Created dataset directory: {self.path}')
+
+ def train(self):
+ """Yüz tanıma modelini eğit"""
+ self.ensure_dataset_exists()
+
+ # Dataset içindeki resim dosyalarını bul
+ imagePaths = list(paths.list_images(self.path))
+ if len(imagePaths) < 1:
+ pub.sendMessage('log', msg='[TrainModel] Nothing to process - no images found in dataset')
+ return 0
+
+ pub.sendMessage('log', msg=f'[TrainModel] Start processing {len(imagePaths)} images...')
+ knownEncodings = []
+ knownNames = []
+ success_count = 0
+ fail_count = 0
+
+ # Her bir resmi işle
+ for (i, imagePath) in enumerate(imagePaths):
+ if '.AppleDouble' in imagePath:
+ continue
+
+ # İlerleme güncelleme
+ if self.progress_callback:
+ progress = int((i / len(imagePaths)) * 100)
+ self.progress_callback(progress, imagePath)
+
+ pub.sendMessage('log', msg=f'[TrainModel] Processing image {i + 1}/{len(imagePaths)} - {imagePath}')
+
+ # Kişi adını dosya yolundan çıkar (dataset/person_name/image.jpg formatında)
+ name = imagePath.split(os.path.sep)[-2]
+
+ # Resmi oku ve işle
+ image = cv2.imread(imagePath)
+ if image is None:
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: Could not read image {imagePath}')
+ fail_count += 1
+ continue
+
+ # OpenCV'den BGR formatından RGB'ye dönüştür (face_recognition kütüphanesi için)
+ rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+ # Yüzleri tespit et - Doğrudan face_recognition.face_locations kullanıyoruz
+ # Cascade yerine daha doğru sonuçlar elde etmek için
+ try:
+ # Yüzleri doğrudan face_recognition ile tespit et
+ boxes = face_recognition.face_locations(rgb, model=self.model)
+
+ if len(boxes) == 0:
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: No face detected in {imagePath}')
+ fail_count += 1
+ continue
+
+ # Tespit edilen yüzlerin kodlarını çıkar
+ encodings = face_recognition.face_encodings(rgb, boxes, num_jitters=self.jitters)
+
+ # Check for valid encodings and filter out invalid ones
+ valid_encodings = []
+ for encoding in encodings:
+ # Check for NaN values which cause comparison issues
+ if np.isnan(encoding).any():
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: NaN values in encoding for {imagePath}')
+ continue
+
+ # Check encoding dimension
+ if len(encoding) != 128:
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: Incorrect encoding dimension for {imagePath}')
+ continue
+
+ # Check for reasonable value ranges (-1 to 1)
+ if np.min(encoding) < -1.1 or np.max(encoding) > 1.1:
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: Encoding values out of range for {imagePath}')
+ continue
+
+ valid_encodings.append(encoding)
+
+ if not valid_encodings:
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: No valid encodings for {imagePath}')
+ fail_count += 1
+ continue
+
+ # Add all valid encodings to our lists
+ for encoding in valid_encodings:
+ knownEncodings.append(encoding)
+ knownNames.append(name)
+ success_count += 1
+
+ except Exception as e:
+ pub.sendMessage('log', msg=f'[TrainModel] Error processing {imagePath}: {e}')
+ fail_count += 1
+ continue
+
+ # Eğer hiçbir encoding oluşturulmadıysa, uyarı ver
+ if len(knownEncodings) == 0:
+ pub.sendMessage('log', msg='[TrainModel] No faces were detected in any images')
+ return 0
+
+ # Dataları pickle formatında kaydet
+ pub.sendMessage('log', msg=f'[TrainModel] Serializing {len(knownEncodings)} encodings...')
+ data = {"encodings": knownEncodings, "names": knownNames}
+
+ # Önce geçici dosyaya kaydet, başarılıysa asıl dosyaya taşı (dosya bozulmasını önlemek için)
+ temp_output = f"{self.output}.tmp"
+ with open(temp_output, "wb") as f:
+ f.write(pickle.dumps(data))
+
+ # Başarıyla kaydedildiyse, asıl dosya adına taşı
+ if os.path.exists(temp_output):
+ if os.path.exists(self.output):
+ # Eskisini yedekle
+ backup = f"{self.output}.bak"
+ try:
+ shutil.copy2(self.output, backup)
+ except Exception as e:
+ pub.sendMessage('log', msg=f'[TrainModel] Warning: Could not backup old model: {e}')
+
+ # Yeni dosyayı taşı
+ shutil.move(temp_output, self.output)
+ pub.sendMessage('log', msg=f'[TrainModel] Saved encodings to {self.output}')
+
+ # Toplam kaç kişi için model eğitildiğini raporla
+ unique_names = set(knownNames)
+ pub.sendMessage('log', msg=f'[TrainModel] Model contains {len(unique_names)} unique persons: {", ".join(unique_names)}')
+
+ return len(knownEncodings)
+
+ def add_faces_from_webcam(self, person_name, num_images=5, camera_id=0):
+ """Web kamerasından bir kişi için yüz örnekleri topla"""
+ if not person_name or person_name.strip() == "":
+ pub.sendMessage('log', msg='[TrainModel] Error: Person name cannot be empty')
+ return False
+
+ # İsimdeki geçersiz karakterleri kontrol et ve temizle
+ person_name = "".join(c for c in person_name if c.isalnum() or c in "_ ").strip()
+ if not person_name:
+ pub.sendMessage('log', msg='[TrainModel] Error: Person name contains only invalid characters')
+ return False
+
+ # Dataset klasörünün varlığını kontrol et
+ self.ensure_dataset_exists()
+
+ # Kişi için dizin oluştur
+ person_dir = os.path.join(self.path, person_name)
+ if not os.path.exists(person_dir):
+ os.makedirs(person_dir)
+
+ # Kamerayı aç
+ pub.sendMessage('log', msg=f'[TrainModel] Opening camera for {person_name}...')
+ cap = cv2.VideoCapture(camera_id)
+ if not cap.isOpened():
+ pub.sendMessage('log', msg='[TrainModel] Error: Could not open camera')
+ return False
+
+ # Görüntü boyutunu ayarla
+ cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
+ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
+
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
+
+ images_captured = 0
+ delay_frames = 5 # Her 5 framede bir görüntü al (benzersizlik için)
+ frame_count = 0
+
+ pub.sendMessage('log', msg=f'[TrainModel] Ready to capture {num_images} images for {person_name}')
+
+ while images_captured < num_images:
+ ret, frame = cap.read()
+ if not ret:
+ pub.sendMessage('log', msg='[TrainModel] Error reading from camera')
+ break
+
+ # Görüntü aynası (kullanıcı için daha doğal)
+ frame = cv2.flip(frame, 1)
+
+ # Görüntü işlemeyi her framede yapmak yerine belirli aralıklarda yap
+ frame_count += 1
+ if frame_count % delay_frames != 0:
+ # Bilgilendirme metni ekle
+ text = f"Capturing for: {person_name} ({images_captured}/{num_images})"
+ cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
+ cv2.imshow('Capture Face', frame)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+ continue
+
+ # Yüz tespiti
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
+
+ # Çerçeve çiz ve göster
+ for (x, y, w, h) in faces:
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
+
+ text = f"Capturing for: {person_name} ({images_captured}/{num_images})"
+ cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
+
+ cv2.imshow('Capture Face', frame)
+ key = cv2.waitKey(1) & 0xFF
+
+ if key == ord('q'):
+ break
+ elif key == ord('c') or key == ord(' '): # 'c' veya space tuşu ile yakala
+ # Eğer yüz tespiti yaptıysa
+ if len(faces) > 0:
+ # En büyük yüzü seç (birden fazla varsa)
+ largest_face = max(faces, key=lambda rect: rect[2] * rect[3])
+
+ # Görüntüyü kaydet
+ timestamp = int(time.time() * 1000)
+ filename = os.path.join(person_dir, f"{timestamp}.jpg")
+ cv2.imwrite(filename, frame)
+
+ images_captured += 1
+ pub.sendMessage('log', msg=f'[TrainModel] Captured image {images_captured}/{num_images}')
+
+ # Biraz bekle
+ time.sleep(0.5)
+ else:
+ pub.sendMessage('log', msg='[TrainModel] No face detected, try again')
+
+ # Temizle ve kapat
+ cap.release()
+ cv2.destroyAllWindows()
+
+ pub.sendMessage('log', msg=f'[TrainModel] Captured {images_captured} images for {person_name}')
+ return images_captured > 0
diff --git a/modules/vision/remote/video_stream.py b/modules/vision/remote/video_stream.py
new file mode 100644
index 00000000..3966eb9e
--- /dev/null
+++ b/modules/vision/remote/video_stream.py
@@ -0,0 +1,282 @@
+import cv2
+from threading import Thread
+from pubsub import pub
+import platform
+import os
+import time
+import numpy as np
+
+class VideoStream:
+ def __init__(self, source_type="webcam", **kwargs):
+ # Configure environment variables for camera backends
+ if 'arm' in platform.machine():
+ os.environ["OPENCV_VIDEOIO_PRIORITY_MSMF"] = "0"
+ os.environ["OPENCV_VIDEOIO_PRIORITY_V4L"] = "1" # Use V4L directly when not using GStreamer
+ os.environ["OPENCV_VIDEOIO_PRIORITY_V4L2"] = "1"
+
+ # Enable GStreamer - important for libcamerasrc
+ os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "1"
+
+ self.source_type = source_type
+ self.stopped = False
+ self.frame = None
+ self.grabbed = False
+ self.resolution = kwargs.get('resolution', (640, 480))
+
+ # Create a blank frame in case initialization fails
+ self.frame = np.zeros((self.resolution[1], self.resolution[0], 3), dtype=np.uint8)
+ font = cv2.FONT_HERSHEY_SIMPLEX
+ cv2.putText(self.frame, "No Camera Feed", (50, self.resolution[1]//2),
+ font, 1, (255, 255, 255), 2, cv2.LINE_AA)
+
+ # Subscribe to module events
+ pub.subscribe(self.stop, 'exit')
+ pub.subscribe(self.update_pipeline, 'camera:set_pipeline')
+
+ # Initialize camera based on source type
+ self._initialize_camera(kwargs)
+
+ def _initialize_camera(self, kwargs):
+ """Initialize the camera based on source type and parameters"""
+ if self.source_type == "remote":
+ # Connect to remote stream
+ self.remote_source = kwargs.get('remote_source', 'http://localhost:8000/stream.mjpg')
+ pub.sendMessage('log', msg=f"[VideoStream] Connecting to remote stream: {self.remote_source}")
+
+ # Set OpenCV parameters for better MJPEG streaming reliability
+ os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
+
+ self.stream = cv2.VideoCapture(self.remote_source)
+
+ # Configure stream parameters
+ self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 3) # Use a small buffer to reduce latency
+
+ for retry in range(3):
+ self.grabbed, frame = self.stream.read()
+ if self.grabbed and frame is not None:
+ self.frame = frame
+ pub.sendMessage('log', msg=f"[VideoStream] Successfully connected to remote stream (attempt {retry+1})")
+ break
+ else:
+ pub.sendMessage('log', msg=f"[VideoStream] Failed to connect to remote stream (attempt {retry+1})")
+ time.sleep(1)
+
+ if not self.grabbed:
+ pub.sendMessage('log', msg="[VideoStream] All connection attempts to remote stream failed")
+ elif self.source_type == "gstreamer":
+ # Add GStreamer pipeline support
+ gst_pipeline = kwargs.get('gst_pipeline', '')
+
+ # If no pipeline specified, create one using libcamerasrc
+ if not gst_pipeline:
+ width, height = self.resolution
+ framerate = kwargs.get('framerate', 30)
+ gst_pipeline = (
+ f"libcamerasrc ! video/x-raw, width={width}, height={height}, "
+ f"framerate={framerate}/1 ! videoconvert ! appsink"
+ )
+
+ pub.sendMessage('log', msg=f"[VideoStream] Using GStreamer pipeline: {gst_pipeline}")
+ self.stream = cv2.VideoCapture(gst_pipeline, cv2.CAP_GSTREAMER)
+
+ if self.stream.isOpened():
+ pub.sendMessage('log', msg="[VideoStream] GStreamer pipeline opened successfully")
+ self.grabbed, frame = self.stream.read()
+ if self.grabbed and frame is not None:
+ self.frame = frame
+ pub.sendMessage('log', msg="[VideoStream] First frame captured from GStreamer")
+ else:
+ pub.sendMessage('log', msg="[VideoStream] Failed to read frame from GStreamer")
+ # Fall back to another method
+ self.source_type = "libcamera"
+ self.stream.release()
+ self._initialize_libcamera(kwargs)
+ else:
+ pub.sendMessage('log', msg="[VideoStream] Failed to open GStreamer pipeline")
+ # Fall back to another method
+ self.source_type = "libcamera"
+ self._initialize_libcamera(kwargs)
+ elif self.source_type == "libcamera":
+ self._initialize_libcamera(kwargs)
+ elif self.source_type == "picamera":
+ self._initialize_picamera(kwargs)
+ else:
+ # Fall back to OpenCV
+ self._initialize_opencv(kwargs)
+
+ def _initialize_libcamera(self, kwargs):
+ """Initialize using libcamera"""
+ try:
+ import pylibcamera
+ from pylibcamera.capture import LibcameraStill
+ from pylibcamera import formats
+
+ self.libcam = LibcameraStill(
+ config={
+ 'width': self.resolution[0],
+ 'height': self.resolution[1],
+ 'format': formats.RGB888,
+ 'buffer_count': 2, # Double buffering for smooth capture
+ }
+ )
+
+ # Start the camera
+ self.libcam.start()
+
+ # Get initial frame
+ raw_frame = self.libcam.capture_array()
+ self.frame = cv2.cvtColor(raw_frame, cv2.COLOR_RGB2BGR)
+ self.grabbed = True
+
+ pub.sendMessage('log', msg="[VideoStream] libcamera initialized successfully")
+ except ImportError as e:
+ pub.sendMessage('log', msg=f"[VideoStream] pylibcamera direct usage failed: {str(e)}, falling back to picamera2")
+ # Fall back to picamera2 which also uses libcamera
+ self.source_type = "picamera"
+ self._initialize_picamera(kwargs)
+
+ def _initialize_picamera(self, kwargs):
+ """Initialize using picamera2"""
+ try:
+ from picamera2 import Picamera2
+
+ self.picam2 = Picamera2()
+
+ # Use simpler configuration to avoid control errors
+ config = self.picam2.create_preview_configuration(
+ main={"size": self.resolution}
+ )
+
+ self.picam2.configure(config)
+ self.picam2.start()
+
+ # Get initial frame
+ self.frame = self.picam2.capture_array()
+ if len(self.frame.shape) > 2 and self.frame.shape[2] == 3:
+ self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
+ self.grabbed = True
+
+ pub.sendMessage('log', msg="[VideoStream] PiCamera2 initialized successfully")
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[VideoStream] PiCamera2 module error: {str(e)}, falling back to OpenCV")
+ # Try using OpenCV with V4L2 directly instead of GStreamer
+ self.source_type = "opencv"
+ self._initialize_opencv(kwargs)
+
+ def _initialize_opencv(self, kwargs):
+ """Initialize using OpenCV"""
+ # Try different camera devices and APIs on Raspberry Pi
+ if 'arm' in platform.machine():
+ camera_devices = [
+ ('/dev/video0', cv2.CAP_V4L2),
+ ('/dev/video0', cv2.CAP_ANY),
+ (0, cv2.CAP_V4L2),
+ (0, cv2.CAP_ANY)
+ ]
+
+ for device, api in camera_devices:
+ pub.sendMessage('log', msg=f"[VideoStream] Trying camera device: {device} with API: {api}")
+ try:
+ self.stream = cv2.VideoCapture(device, api)
+ if self.stream.isOpened():
+ self.grabbed, frame = self.stream.read()
+ if self.grabbed and frame is not None:
+ self.frame = frame
+ pub.sendMessage('log', msg=f"[VideoStream] Successfully opened camera with device: {device}, API: {api}")
+ break
+ else:
+ self.stream.release()
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[VideoStream] Failed with device {device}: {str(e)}")
+ else:
+ camera_id = kwargs.get('camera_id', 0)
+ self.stream = cv2.VideoCapture(camera_id)
+ if self.stream.isOpened():
+ self.grabbed, frame = self.stream.read()
+ if self.grabbed and frame is not None:
+ self.frame = frame
+
+ # Set resolution for OpenCV cameras
+ if hasattr(self, 'stream') and self.stream.isOpened():
+ self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.resolution[0])
+ self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.resolution[1])
+
+ def update_pipeline(self, pipeline):
+ """Update the camera pipeline with a new one (for dynamic updates)"""
+ if self.source_type != "gstreamer":
+ pub.sendMessage('log', msg=f"[VideoStream] Cannot update pipeline for {self.source_type} camera")
+ return
+
+ pub.sendMessage('log', msg=f"[VideoStream] Updating GStreamer pipeline to: {pipeline}")
+
+ # Stop current stream
+ if hasattr(self, 'stream') and self.stream is not None:
+ self.stream.release()
+
+ # Initialize new stream with the pipeline
+ self.stream = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)
+
+ if self.stream.isOpened():
+ self.grabbed, frame = self.stream.read()
+ if self.grabbed and frame is not None:
+ self.frame = frame
+ pub.sendMessage('log', msg="[VideoStream] Pipeline updated successfully")
+ else:
+ pub.sendMessage('log', msg="[VideoStream] Failed to read frame from updated pipeline")
+ else:
+ pub.sendMessage('log', msg="[VideoStream] Failed to open updated pipeline")
+
+ def start(self):
+ Thread(target=self.update, args=()).start()
+ return self
+
+ def update(self):
+ while True:
+ if self.stopped:
+ self._release_resources()
+ return
+
+ try:
+ if self.source_type == "libcamera" and hasattr(self, 'libcam'):
+ # Capture using direct libcamera
+ raw_frame = self.libcam.capture_array()
+ self.frame = cv2.cvtColor(raw_frame, cv2.COLOR_RGB2BGR)
+ self.grabbed = True
+ elif self.source_type == "picamera" and hasattr(self, 'picam2'):
+ self.frame = self.picam2.capture_array()
+ # Convert to BGR if needed (picamera2 returns RGB by default)
+ if len(self.frame.shape) > 2 and self.frame.shape[2] == 3:
+ self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
+ self.grabbed = True
+ else:
+ self.grabbed, self.frame = self.stream.read()
+
+ # If frame reading failed, wait a moment and try again
+ if not self.grabbed or self.frame is None:
+ time.sleep(0.1)
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[VideoStream] Error capturing frame: {str(e)}")
+ time.sleep(0.5) # Wait before trying again
+
+ def read(self):
+ return self.frame
+
+ def stop(self):
+ self.stopped = True
+
+ def _release_resources(self):
+ if self.source_type == "libcamera" and hasattr(self, 'libcam'):
+ try:
+ self.libcam.stop()
+ except:
+ pass
+ elif self.source_type == "picamera" and hasattr(self, 'picam2'):
+ try:
+ self.picam2.stop()
+ except:
+ pass
+ elif hasattr(self, 'stream'):
+ try:
+ self.stream.release()
+ except:
+ pass
diff --git a/modules/vision/remote/vision.py b/modules/vision/remote/vision.py
new file mode 100644
index 00000000..61b51d80
--- /dev/null
+++ b/modules/vision/remote/vision.py
@@ -0,0 +1,293 @@
+import cv2
+from datetime import datetime
+from imutils.video import FPS
+from pubsub import pub
+import platform
+import os
+
+try:
+ from modules.vision.remote.faces import Faces
+except ModuleNotFoundError as e:
+ from faces import Faces
+from modules.vision.remote.video_stream import VideoStream
+
+class RemoteVision:
+ MODE_MOTION = 0
+ MODE_FACES = 1
+
+ def __init__(self, **kwargs):
+ # Configure environment variables for camera backends
+ if 'arm' in platform.machine():
+ os.environ["OPENCV_VIDEOIO_PRIORITY_MSMF"] = "0"
+ os.environ["OPENCV_VIDEOIO_PRIORITY_V4L"] = "1" # Use V4L directly
+ os.environ["OPENCV_VIDEOIO_PRIORITY_V4L2"] = "1"
+
+ # Enable GStreamer - important for libcamerasrc
+ use_gstreamer = kwargs.get('enable_gstreamer', True)
+ if use_gstreamer:
+ os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "1"
+ pub.sendMessage('log', msg="[RemoteVision] GStreamer support enabled")
+ else:
+ os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "0"
+ pub.sendMessage('log', msg="[RemoteVision] GStreamer support disabled")
+
+ self.is_raspberry_pi = 'arm' in platform.machine()
+ self.dimensions = kwargs.get('resolution', (640, 480))
+
+ # For Raspberry Pi: just capture and stream, don't process
+ if self.is_raspberry_pi:
+ # We'll only set up the camera source here - streaming is handled by camera_streamer
+ pub.sendMessage('log', msg="[RemoteVision] Setting up camera on Raspberry Pi for streaming")
+
+ # Use a more robust method to detect available camera types
+ camera_type = kwargs.get('camera_type', 'auto')
+
+ # Check if a custom GStreamer pipeline is provided
+ gst_pipeline = kwargs.get('gst_pipeline', None)
+ if gst_pipeline:
+ camera_type = 'gstreamer'
+ pub.sendMessage('log', msg=f"[RemoteVision] Using custom GStreamer pipeline: {gst_pipeline}")
+
+ if camera_type == 'auto':
+ # First try GStreamer if available
+ try:
+ test_pipeline = "videotestsrc num-buffers=1 ! appsink"
+ test_cap = cv2.VideoCapture(test_pipeline, cv2.CAP_GSTREAMER)
+ if test_cap.isOpened():
+ ret = test_cap.read()
+ test_cap.release()
+ if ret[0]:
+ camera_type = 'gstreamer'
+ # Create a default libcamerasrc pipeline if none provided
+ if not gst_pipeline:
+ # Use the confirmed working pipeline with BGR format
+ gst_pipeline = f"libcamerasrc ! video/x-raw, format=BGR, width={self.dimensions[0]}, height={self.dimensions[1]}, framerate=30/1 ! videoconvert ! appsink"
+ pub.sendMessage('log', msg=f"[RemoteVision] Using default GStreamer pipeline: {gst_pipeline}")
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[RemoteVision] GStreamer test failed: {str(e)}")
+ # Try picamera next
+ try:
+ from picamera2 import Picamera2
+ picam = Picamera2()
+ # Just verify it can be imported and instantiated
+ camera_type = 'picamera'
+ del picam
+ except ImportError:
+ pub.sendMessage('log', msg="[RemoteVision] PiCamera2 not available, falling back to OpenCV")
+ camera_type = 'opencv'
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[RemoteVision] PiCamera2 error: {str(e)}, falling back to OpenCV")
+ camera_type = 'opencv'
+
+ # Only try libcamera if explicitly requested in config
+ if kwargs.get('force_libcamera', False):
+ try:
+ import libcamera
+ camera_type = 'libcamera'
+ except ImportError:
+ pub.sendMessage('log', msg="[RemoteVision] Libcamera not available")
+
+ pub.sendMessage('log', msg=f"[RemoteVision] Using {camera_type} on RPi")
+
+ # Pass additional parameters to VideoStream
+ video_params = kwargs.copy()
+ if gst_pipeline:
+ video_params['gst_pipeline'] = gst_pipeline
+
+ # Subscribe to camera pipeline updates from test tools
+ pub.subscribe(self.update_camera_pipeline, 'camera:set_pipeline')
+
+ self.video = VideoStream(
+ source_type=camera_type,
+ resolution=self.dimensions,
+ **video_params
+ ).start()
+ else:
+ # UPDATED: Use the correct URL format for MJPEG streaming
+ self.remote_source = kwargs.get('remote_source', f'http://{kwargs.get("robot_ip", "192.168.137.52")}:8000/stream.mjpg')
+ self.command_endpoint = kwargs.get('command_endpoint', f'http://{kwargs.get("robot_ip", "192.168.137.52")}:8090')
+ pub.sendMessage('log', msg=f"[RemoteVision] Using command endpoint: {self.command_endpoint}")
+ pub.sendMessage('log', msg=f"[RemoteVision] Connecting to remote stream: {self.remote_source}")
+ self.video = VideoStream(source_type="remote", remote_source=self.remote_source, **kwargs).start()
+
+ self.mode = kwargs.get('mode', RemoteVision.MODE_MOTION)
+ self.path = kwargs.get('path', '/')
+ self.flip = kwargs.get('flip', False)
+ self.rotate = kwargs.get('rotate', False)
+ self.preview = kwargs.get('preview', False)
+ self.accuracy = kwargs.get('accuracy', 10)
+ self.static_back = None
+ self.fps = FPS().start()
+ pub.subscribe(self.exit, "exit")
+
+ # Add new parameters for laptop-based processing
+ self.laptop_processing = kwargs.get('laptop_processing', False)
+ self.priority_persons = kwargs.get('priority_persons', [])
+
+ # For tracking which persons were seen and their priority status
+ self.detected_persons = {}
+ self.last_detection_time = {}
+
+ if self.mode == RemoteVision.MODE_FACES:
+ try:
+ self.cascade_path = self.path + "/modules/vision/haarcascade_frontalface_default.xml"
+ self.cascade = cv2.CascadeClassifier(self.cascade_path)
+ self.faces = Faces(detector=self.cascade, path=self.path)
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"[RemoteVision] Error initializing face detection: {str(e)}")
+ # Fall back to motion detection
+ self.mode = RemoteVision.MODE_MOTION
+
+ self.running = True
+ pub.sendMessage('log', msg=f"[RemoteVision] Initialized with {self.dimensions[0]}x{self.dimensions[1]} resolution")
+
+ def update_camera_pipeline(self, pipeline):
+ """Update the camera pipeline (called from external test tools)"""
+ if hasattr(self, 'video') and hasattr(self.video, 'update_pipeline'):
+ pub.sendMessage('log', msg=f"[RemoteVision] Updating camera pipeline to: {pipeline}")
+ self.video.update_pipeline(pipeline)
+ else:
+ pub.sendMessage('log', msg="[RemoteVision] Unable to update camera pipeline - incompatible video stream")
+
+ def exit(self):
+ self.running = False
+ if self.video and hasattr(self.video, "stop"):
+ self.video.stop()
+ cv2.destroyAllWindows()
+ self.fps.stop()
+ pub.sendMessage("log", msg="[RemoteVision] Approx. FPS: {:.2f}".format(self.fps.fps()))
+
+ def detect(self):
+ if not self.running:
+ return
+ self.fps.update()
+ frame = self.video.read()
+ if frame is None or frame.size == 0:
+ pub.sendMessage('log', msg="[RemoteVision] Stream ends prematurely (no frame); attempting to reconnect...")
+ try:
+ self.video.stop()
+ except Exception:
+ pass
+ import time
+ time.sleep(1)
+ self.video = VideoStream(source_type="remote", remote_source=self.remote_source, **{'resolution': self.dimensions}).start()
+ return
+
+ try:
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[RemoteVision] Error converting frame: {str(e)}")
+ return
+
+ matches = []
+ names = []
+
+ if self.mode == RemoteVision.MODE_FACES:
+ try:
+ matches = self.cascade.detectMultiScale(
+ gray,
+ scaleFactor=1.1,
+ minNeighbors=self.accuracy,
+ minSize=(30, 30)
+ )
+ if len(matches) > 0:
+ cnt = 0
+ for (x, y, w, h) in matches:
+ cropped = frame[y:y+h, x:x+w]
+ cnt += 1
+ names += self.faces.detect(cropped, [(0, 0, w, h)], cnt == len(matches))
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[RemoteVision] Error in face detection: {str(e)}")
+
+ elif self.mode == RemoteVision.MODE_MOTION:
+ try:
+ gray_blur = cv2.GaussianBlur(gray, (21, 21), 0)
+ if self.static_back is None:
+ self.static_back = gray_blur
+ return
+ diff_frame = cv2.absdiff(self.static_back, gray_blur)
+ thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]
+ thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)
+ contours, _ = cv2.findContours(thresh_frame.copy(),
+ cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+ for contour in contours:
+ if cv2.contourArea(contour) < 10000:
+ continue
+ (x, y, w, h) = cv2.boundingRect(contour)
+ matches.append((x, y, w, h))
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[RemoteVision] Error in motion detection: {str(e)}")
+
+ if self.preview and frame is not None:
+ self.render(frame, matches, names)
+ return matches
+
+ def render(self, frame, matches, names):
+ index = 0
+ for (x, y, w, h) in matches:
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
+ if names and index < len(names):
+ label_y = y - 15 if y - 15 > 15 else y + 15
+ cv2.putText(frame, names[index], (x, label_y),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
+ index += 1
+ try:
+ cv2.imshow("Remote Preview", frame)
+ cv2.waitKey(1)
+ except Exception as e:
+ pub.sendMessage('log', msg=f"[RemoteVision] Error displaying preview: {str(e)}")
+
+ def get_area(self, match):
+ if match is not None:
+ x, y, w, h = match
+ return float(w) * float(h)
+ return 0
+
+ def process_face_data(self, face_data):
+ """Process face detection data from laptop
+
+ This method is called when the laptop sends face recognition data
+ through the command channel, letting the robot know about detected faces
+ without doing local processing.
+ """
+ if not self.laptop_processing:
+ return
+
+ try:
+ count = face_data.get('count', 0)
+ names = face_data.get('names', [])
+ priority_names = face_data.get('priority_names', [])
+
+ now = time.time()
+
+ # Update detection tracking
+ for name in names:
+ self.detected_persons[name] = now
+
+ # Process highest priority person for tracking
+ if priority_names:
+ # If multiple priority persons, choose the one with highest priority
+ highest_priority = priority_names[0] # Default to first
+
+ # Publish tracking info for the robot's servos
+ pub.sendMessage('priority_person:detected', name=highest_priority)
+
+ # Log the detection
+ pub.sendMessage('log', msg=f"[RemoteVision] Detected priority person: {highest_priority}")
+
+ elif names:
+ # If no priority persons but regular persons detected
+ pub.sendMessage('log', msg=f"[RemoteVision] Detected {count} persons: {', '.join(names)}")
+
+ # Clean up old detections (older than 5 seconds)
+ for name in list(self.detected_persons.keys()):
+ if now - self.detected_persons[name] > 5:
+ del self.detected_persons[name]
+
+ except Exception as e:
+ pub.sendMessage('log:error', msg=f"[RemoteVision] Error processing face data: {e}")
+
+ def set_priority_persons(self, persons):
+ """Update the list of priority persons"""
+ self.priority_persons = persons
+ pub.sendMessage('log', msg=f"[RemoteVision] Updated {len(persons)} priority persons")
diff --git a/tests/test_sensor.py b/tests/test_sensor.py
new file mode 100644
index 00000000..8a2ea654
--- /dev/null
+++ b/tests/test_sensor.py
@@ -0,0 +1,33 @@
+import unittest
+from unittest.mock import patch, MagicMock
+
+# Mock gpiozero and pubsub libraries
+import sys
+sys.modules['gpiozero'] = MagicMock()
+sys.modules['pubsub'] = MagicMock()
+sys.modules['pubsub.pub'] = MagicMock()
+
+from modules.sensor import Sensor
+
+class TestSensor(unittest.TestCase):
+ def setUp(self):
+ self.pin = 4
+ self.sensor = Sensor(pin=self.pin)
+
+ @patch('modules.sensor.MotionSensor')
+ def test_init(self, MockMotionSensor):
+ sensor_instance = MockMotionSensor.return_value
+ sensor = Sensor(pin=self.pin)
+ self.assertEqual(sensor.pin, self.pin)
+ self.assertEqual(sensor.sensor, sensor_instance)
+
+ @patch('modules.sensor.MotionSensor')
+ def test_read(self, MockMotionSensor):
+ sensor_instance = MockMotionSensor.return_value
+ sensor_instance.motion_detected = True
+ sensor = Sensor(pin=self.pin)
+ self.assertTrue(sensor.read())
+ self.assertTrue(sensor.value)
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file