diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..a2ca3645 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,38 @@ + + +## What type of change is this? +- [ ] New module +- [ ] Change to an existing module +- [ ] Core improvement +- [ ] Other (please describe) + +## 📝 What does this change do? + + +## ❓ Why is this change needed? + + +## 🛠️ How was this implemented? + + +## 🧪 How was this tested? + + +## 💥 Breaking changes + +- [ ] Yes (please describe) +- [ ] No + +## 🗂 Related issues + + +## ✅ PR Checklist +- [ ] **Title & Description:** PR title and description are clear and complete. +- [ ] **Documentation:** PR links to idea in github discussion group containing complete documentation. +- [ ] **Scope & Size:** PR is focused on a single issue/feature and is a reasonable size. +- [ ] **Code Quality:** Code is clean, consistent, and follows the project style guide. +- [ ] **Tests:** Tests have been added/updated if needed. +- [ ] **Manual Testing:** Changes have been tested on the latest release of the project. +- [ ] **Self-Review:** I’ve reviewed my own code and ensured there are no obvious issues. + +🚀 Thank you for your contribution to the project! diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..379ff1c6 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,31 @@ +name: Python Unittest on PR + +on: + pull_request: + types: [opened, synchronize, reopened] + +jobs: + run-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + + - name: Run unittests + run: | + python -m unittest discover -s tests -p 'test_*.py' + + permissions: + pull-requests: write + contents: read \ No newline at end of file diff --git a/README.md b/README.md index 78926236..10f60fb9 100644 --- a/README.md +++ b/README.md @@ -1,190 +1,43 @@ -[![Companion Robot](https://circleci.com/gh/danic85/companion-robot.svg?style=shield)](https://app.circleci.com/pipelines/github/danic85/companion-robot) - -# Robotics Development Framework -This platform has been created to allow modular development and experimentation of robotics in python / C++ using the Raspberry Pi and Arduino. - -## Coral TPU Accelerator - -To use the Googla Coral USB Accelerator, first flash the Pi SD card with the image found in the [AIY Maker Kit](https://aiyprojects.withgoogle.com/maker/) -([Download as of 2022-08-05](https://github.com/google-coral/aiy-maker-kit-tools/releases/download/v20220518/aiy-maker-kit-2022-05-18.img.xz)) - -(I attempted to install the required software from the coral [getting started guide](https://coral.ai/docs/accelerator/get-started#1-install-the-edge-tpu-runtime) but I was unable to get past an error relating to grpico "GLIBC_2.29' not found") - -Alternatively, set Config.vision.tech to `opencv` for the original (slower) facial recognition. I am not updating this anymore so you may find some integration issues. - -## Installation -``` -chmod 777 install.sh -./install.sh -``` - -Disable audio (see Neopixels section below) - -## Running -``` -./startup.sh -``` -To execute manual control via keyboard: -``` -./manual_startup.sh -``` -To execute startup including a preview of the video feed (not available via SSH): -``` -./preview_startup.sh -``` - -###Testing -``` -python3 -m pytest --cov=modules --cov-report term-missing -``` - -## Run on Startup - -Execute `sudo vim /etc/rc/local` and add the following lines before the `exit 0` statement: -``` -python3 /home/archie/companion-robot/shutdown_pi.py -/home/archie/companion-robot/startup.sh -``` - -### Auto shutdown -GPIO 26 is wired to allow shutdown when brought to ground via a switch. - -The script `shutdown_pi.py` manages this. - -Guide: -https://howchoo.com/g/mwnlytk3zmm/how-to-add-a-power-button-to-your-raspberry-pi - -## Features - -### Facial detection and tracking -Using the Raspberry Pi camera - -### Servo control -Control of up to 9 servos via an arduino serial connection - -### Battery monitor -Both external and software integrated via the arduino serial connection - -### Buzzer -A buzzer is connected to GPIO 27 to allow for tones to be played in absence of audio output (see Neopixel below). -https://github.com/gumslone/raspi_buzzer_player.git - -### Motion Sensor -An RCWL-0516 microwave radar sensor is equipped on GPIO 13 - -### Stereo MEMS Mics -GPIO 18, 19 and 20 allow stereo MEMS microphones as audio input -``` -Mic 3V to Pi 3.3V -Mic GND to Pi GND -Mic SEL to Pi GND (this is used for channel selection, connect to either 3.3V or GND) -Mic BCLK to BCM 18 (pin 12) -Mic DOUT to BCM 20 (pin 38) -Mic LRCL to BCM 19 (pin 35) -``` -https://learn.adafruit.com/adafruit-i2s-mems-microphone-breakout/raspberry-pi-wiring-test - - -``` -cd ~ -sudo pip3 install --upgrade adafruit-python-shell -wget https://raw.githubusercontent.com/adafruit/Raspberry-Pi-Installer-Scripts/master/i2smic.py -sudo python3 i2smic.py -``` - -####Test -`arecord -l` -`arecord -D plughw:0 -c2 -r 48000 -f S32_LE -t wav -V stereo -v file_stereo.wav` - -_Note:_ See below for additional configuration to support voice recognition - -### Speech Recognition -Trigger word for voice recognition (currently not used): -https://snowboy.kitt.ai/ - -Speech recognition is enabled whenever a face is visible. -Ensure that the `device_index` specified in `modules/speechinput.py` matches your microphone. - -See `scripts/speech.py` to list input devices and test. See below for MEMS microphone configuration - -### MEMS Microphone configuration for speech recognition - -By default the Adafruit I2S MEMS Microphone Breakout does not work with speech recognition. - -To support voice recognition on the MEMS microphone(s) the following configuration changes are needed. - -`sudo apt-get install ladspa-sdk` - -Create `/etc/asound.conf` with the following content: - -``` -pcm.pluglp { - type ladspa - slave.pcm "plughw:0" - path "/usr/lib/ladspa" - capture_plugins [ - { - label hpf - id 1042 - } - { - label amp_mono - id 1048 - input { - controls [ 30 ] - } - } - ] -} - -pcm.lp { - type plug - slave.pcm pluglp -} -``` - -This enables the device 'lp' to be referenced in voice recognition. Shown with index `18` in the example below. - -Sample rate should also be set to `16000` - -`mic = sr.Microphone(device_index=18, sample_rate=16000)` - -References: - -* [MEMS Microphone Installation Guide](https://learn.adafruit.com/adafruit-i2s-mems-microphone-breakout/raspberry-pi-wiring-test) - -* [Adafruit Support discussing issue](https://forums.adafruit.com/viewtopic.php?f=50&t=181675&p=883853&hilit=MEMS#p883853) - -* [Referenced documentation of fix](https://github.com/mpromonet/v4l2rtspserver/issues/94) - -### Serial communication with Arduino - -In order to use the Raspberry Pi’s serial port, we need to disable getty (the program that displays login screen) - -`sudo raspi-config -> Interfacing Options -> Serial -> "Would you like a login shell to be accessible over serial" = 'No'. Restart` - -#### Connection via serial pins -Connect the Pi GPIO 14 & 15 (tx & rx) to the arduino tx & rx (tx -> rx in both directions!) via a logic level shifter, as the Pi is 3v3 and the arduino is (most likely) 5v. - -####Upload to Arduino over serial pins -To upload over serial pins, press the reset button on the Arduino at the point that the IDE starts 'uploading' (after compile), otherwise a sync error will display. - -### Neopixel - -WS1820B support is included via the Pi GPIO pin 12. Unfortunately to support this you must disable audio on the Pi. - -``` -sudo vim /boot/config.txt -#dtparam=audio=on -``` - -This is also why the application must be executed with `sudo` - -https://learn.adafruit.com/neopixels-on-raspberry-pi/python-usage - -## PCBs -Prefabricated PCBs are available for this project in the `circuits` folder. This allows the connection between the core components as described above. - -![Top](circuits/v2/Upper/Top%20Feb%202021_pcb.png) - -![Bottom](circuits/v2/Lower/Lower%20Feb%202021_pcb.png) +# Open Source, 3D Printable, Modular Bipedal Robot Project + +The **Modular Bipedal Robot** project aims to educate and inspire individuals interested in robotics and electronics. This open-source initiative focuses on creating a fully autonomous companion robot with a variety of advanced features. + +## Key Features + +- **Bipedal Design**: The robot includes articulated legs for bipedal movement. +- **Control Systems**: Utilizes Arduino and Raspberry Pi, managed through custom PCBs. +- **Modular Body**: Configurable body components allow for easy customization and adaptability. +- **Software Modules**: + - Animation: Handles the animation of the robot, including walking, turning, and other movements. + - Braillespeak: Converts text to Braille and speaks it using a proprietary audio output using the onboard buzzer. + - Buzzer: Controls the buzzer for audio output. Includes the ability to play tones and melodies. + - ChatGPT: Uses the OpenAI GPT models to process text based on user input. + - Logging: Logs data to a file for debugging and analysis. + - Motion Detection: Handles motion detection using an onboard microwave motion sensor. + - Neopixel: Controls the onboard Neopixel LEDs for visual feedback. + - PiServo: Controls the servos connected to the Raspberry Pi. + - PiTemperature: Reads the temperature from the integrated temperature sensor on the Raspberry Pi. + - RTLSDR: Uses an RTL-SDR dongle to receive and process radio signals. + - Serial Connection: Handles serial communication between the Raspberry Pi and Arduino. + - Servos: Controls the servos connected to the Arduino via the Raspberry Pi and the serial connection. + - Tracking: Uses computer vision to track objects and faces using the onboard camera. + - Translator: Translates text between languages using the Google Translate API. + - TTS: Converts text to speech using the onboard speaker. + - Viam: Uses the VIAM API to integrate Viam modules for additional functionality. + - Vision: Handles image processing and computer vision tasks using the onboard IMX500 Raspberry Pi AI camera. + - [Read more](https://github.com/makerforgetech/modular-biped/wiki/Software#modules)! + +## Project Background + +The Modular Biped Robot Project is designed to provide a flexible and modular framework for robotics development using Python and C++ on the Raspberry Pi and Arduino platforms. It aims to enable developers, robotics enthusiasts, and curious individuals to experiment, create, and customize their own biped robots. With a range of features and functionalities and the option to add your own easily, the Modular Biped Robot Project offers an exciting opportunity to explore the world of robotics. + +## Modularity + +The open source framework is designed for flexibility, allowing users to easily add or remove components to suit their specific needs. Comprehensive [guides](https://github.com/makerforgetech/modular-biped/wiki/Software#creating-a-module) are provided for integrating new modules seamlessly. + +## Resources + +- **Documentation**: For detailed information, visit the project’s GitHub wiki: [Modular Biped Documentation](https://github.com/makerforgetech/modular-biped/wiki) +- **Code**: Check out the modular open source software on [GitHub](https://github.com/makerforgetech/modular-biped) +- **YouTube Playlist**: Explore the development process through our build videos: [Watch on YouTube](https://www.youtube.com/watch?v=2DVJ5xxAuWY&list=PL_ua9QbuRTv6Kh8hiEXXVqywS8pklZraT) +- **Community**: Have a question or want to show off your build? Join the communities on [GitHub](https://bit.ly/maker-forge-community) and [Discord](https://bit.ly/makerforge-community)! diff --git a/Software Architecture.drawio.svg b/Software Architecture.drawio.svg new file mode 100644 index 00000000..04b92b57 --- /dev/null +++ b/Software Architecture.drawio.svg @@ -0,0 +1,4 @@ + + + +
Config YAML
Config YAML
Python Module
Python Module
Module
Module
Config YAML
Config YAML
Python Module
Python Module
Module
Module
Config YAML
Config YAML
Python Module
Python Module
Module
Module
install.sh
install.sh
startup.sh
startup.sh
stop.sh
stop.sh
Config
  • Arguments
  • Python Dependencies
  • Unix Dependencies
  • Additional Instructions
Config...
Python Module

__init__(**kwargs)

  • pubsub module communication
  • logging via pubsub
Python Module...
main.py
main.py
module loader.py
module loader.py
Text is not SVG - cannot display
\ No newline at end of file diff --git a/circuits/v4/Head/v4head/v4head-backups/v4head-2024-11-08_135158.zip b/circuits/v4/Head/v4head/v4head-backups/v4head-2024-11-08_135158.zip new file mode 100644 index 00000000..d4e65817 Binary files /dev/null and b/circuits/v4/Head/v4head/v4head-backups/v4head-2024-11-08_135158.zip differ diff --git a/config/neopixel.yml b/config/neopixel.yml index 53c49d7a..fcd3c094 100644 --- a/config/neopixel.yml +++ b/config/neopixel.yml @@ -2,8 +2,8 @@ neopixel: enabled: true path: 'modules.neopixel.neopx.NeoPx' config: - pin: None # GPIO2 and 3 are use for i2c - i2c: True + pin: 12 # Only used for GPIO. GPIO2 and 3 are use for i2c, GPIO10 is used for SPI, GPIO12 is used for GPIO + protocol: 'I2C' # choose between GPIO, I2C and SPI count: 12 positions: { 'status1': 0, @@ -36,6 +36,8 @@ neopixel: dependencies: python: - pypubsub - - adafruit-circuitpython-seesaw + - adafruit-circuitpython-seesaw # i2c SUPPORT + - adafruit-blinka # SPI SUPPORT + - adafruit-circuitpython-neopixel-spi # SPI SUPPORT emotion_analysis: enable: false diff --git a/config/vision.yml b/config/vision.yml index 5c479ac4..2bb6cf8d 100644 --- a/config/vision.yml +++ b/config/vision.yml @@ -7,5 +7,4 @@ vision: unix: - imx500-all python: - - python3-opencv - - python3-munkres + - opencv-python diff --git a/install.sh b/install.sh index 785466e1..d2109663 100755 --- a/install.sh +++ b/install.sh @@ -10,6 +10,9 @@ UNIX_DEPENDENCIES=() ADDITIONAL_URLS=() ACTIVE_MODULES=() +# Install yaml package for Python +myenv/bin/python3 -m pip install pyyaml + # Helper function to parse dependencies from YAML files using Python parse_dependencies() { myenv/bin/python3 - < /dev/null +[Unit] +Description=Modular Biped Launcher +After=network.target + +[Service] +Type=simple +ExecStart=$BASE_DIR/startup.sh +WorkingDirectory=$BASE_DIR +User=$USER +Restart=on-failure + +[Install] +WantedBy=multi-user.target +EOF + + echo "Service file created at $SERVICE_FILE_PATH" + + # Reload systemd to pick up the new service + sudo systemctl daemon-reload + + # Enable and start the service + sudo systemctl enable $SERVICE_NAME + sudo systemctl start $SERVICE_NAME + + echo "$SERVICE_NAME has been installed and started." +} + +# Function to remove the service +remove_service() { + echo "Removing the $SERVICE_NAME service..." + + # Stop and disable the service + sudo systemctl stop $SERVICE_NAME + sudo systemctl disable $SERVICE_NAME + + # Remove the service file + sudo rm -f "$SERVICE_FILE_PATH" + + # Reload systemd to apply changes + sudo systemctl daemon-reload + + echo "$SERVICE_NAME has been removed." +} + +# Parse user input +case "$1" in + enable) + install_service + ;; + disable) + remove_service + ;; + "") + echo "No argument provided. Enabling the service by default." + install_service + echo + echo "To disable this service, run:" + echo " $0 disable" + ;; + *) + echo "Usage: $0 {enable|disable}" + exit 1 + ;; +esac diff --git a/main.py b/main.py index 31e509d6..4c32379b 100644 --- a/main.py +++ b/main.py @@ -2,7 +2,7 @@ import logging from time import sleep, time import signal -import schedule +# import schedule from pubsub import pub from modules.config import Config from module_loader import ModuleLoader @@ -88,7 +88,7 @@ def main(): if time() - minute_loop > 60: minute_loop = time() pub.sendMessage('loop:60') - schedule.run_pending() + # schedule.run_pending() except Exception as ex: logging.error(f"Exception: {ex}", exc_info=True) diff --git a/modules/SerialShareManager.py b/modules/SerialShareManager.py new file mode 100644 index 00000000..5fe91939 --- /dev/null +++ b/modules/SerialShareManager.py @@ -0,0 +1,339 @@ +import threading +import time +from pubsub import pub +import traceback +import serial # Serial bağlantı için + +# --- MQTT aktifse bu modül devre dışı bırakılır --- +DISABLE_SERIAL_SHARE_MANAGER = False + +class SerialShareManager: + """ + Class to manage ESP32 serial connection sharing between modules + """ + _instance = None + _lock = threading.RLock() + + def __new__(cls): + """Singleton pattern to ensure only one instance exists""" + with cls._lock: + if cls._instance is None: + cls._instance = super(SerialShareManager, cls).__new__(cls) + cls._instance._serial = None + cls._instance._initialized = False + cls._instance._active_modules = {} + cls._instance._verbose_logging = False # Default olarak detaylı log kapalı + cls._instance._setup_pubsub() + # Log creation only in verbose mode + try: + if cls._instance._verbose_logging: + pub.sendMessage('log', msg=f'[SerialManager] Instance created') + except: + pass # Suppress if pub not ready + return cls._instance + + def _setup_pubsub(self): + """Set up pubsub handlers""" + try: + pub.subscribe(self._handle_serial_ready, 'esp32:serial_ready') + pub.subscribe(self._handle_module_connect, 'esp32:module_connect') + pub.subscribe(self._handle_module_disconnect, 'esp32:module_disconnect') + # Önemli olaylar için yeni abonelikler + pub.subscribe(self._handle_priority_animation, 'priority:person_detected') + except: + # May happen if pubsub not initialized yet + pass + + def _handle_serial_ready(self): + """Handle serial ready event from provider module""" + if self._verbose_logging: + pub.sendMessage('log', msg=f'[SerialManager] Received serial ready notification') + # Broadcast our own ready event + try: + threading.Timer(0.2, lambda: pub.sendMessage('esp32:serial_global_ready')).start() + except: + pass + + def _handle_module_connect(self, module_name): + """Track module connection to serial""" + with self._lock: + self._active_modules[module_name] = time.time() + if self._verbose_logging: + pub.sendMessage('log', msg=f'[SerialManager] Module {module_name} connected to serial') + + def _handle_module_disconnect(self, module_name): + """Track module disconnection from serial""" + with self._lock: + if module_name in self._active_modules: + del self._active_modules[module_name] + if self._verbose_logging: + pub.sendMessage('log', msg=f'[SerialManager] Module {module_name} disconnected from serial') + + def _handle_priority_animation(self, person_name, animation_name="RAINBOW_CYCLE"): + """Öncelikli kişi için animasyon tetikleyici""" + if self._serial and self._initialized: + try: + pub.sendMessage('log:info', msg=f'[SerialManager] Priority person detected: {person_name}, ensuring animation runs') + # LED animasyonunu doğrudan tetikle + animation_cmd = f"ANIMATE {animation_name.upper()} GREEN 3\n" + self._serial.write(animation_cmd.encode()) + + # Yedekleme olarak standart LED animasyon kanalını da tetikle + threading.Timer(0.5, lambda: pub.sendMessage('led:animate', animation=animation_name, color='GREEN', repeat=3)).start() + except Exception as e: + pub.sendMessage('log:error', msg=f'[SerialManager] Error sending priority animation: {e}') + + def register_serial_provider(self, serial, provider_name='neopixel'): + """Register the serial connection from a provider module""" + if DISABLE_SERIAL_SHARE_MANAGER: + return False + with self._lock: + old_serial = self._serial + self._serial = serial + self._initialized = True + self._active_modules[provider_name] = time.time() + + # Eski bağlantıyı kapat (varsa) + if old_serial and old_serial != serial: + try: + old_serial.close() + except: + pass + + try: + if self._verbose_logging: + pub.sendMessage('log', msg=f'[SerialManager] Serial registered by {provider_name}') + + # Bağlantıyı test et + self._verify_connection() + + # Notify others that serial is ready + threading.Timer(0.5, lambda: pub.sendMessage('esp32:serial_global_ready')).start() + except Exception as e: + pub.sendMessage('log:error', msg=f'[SerialManager] Error in register_serial_provider: {e}') + traceback.print_exc() + + def _verify_connection(self): + """Seri port bağlantısını doğrula""" + if not self._serial: + return False + + try: + # Veri akışını temizle + self._serial.reset_input_buffer() + self._serial.reset_output_buffer() + + # PING komutu gönder + self._serial.write(b"PING\n") + + # Yanıtı bekle (2 saniye timeout) + start_time = time.time() + response = "" + + while time.time() - start_time < 2.0: + if self._serial.in_waiting > 0: + line = self._serial.readline().decode().strip() + if "PONG" in line: + if self._verbose_logging: + pub.sendMessage('log:info', msg=f'[SerialManager] Connection verified (PONG received)') + return True + + # PONG alınamadı, tekrar dene + self._serial.write(b"PING\n") + start_time = time.time() + + while time.time() - start_time < 2.0: + if self._serial.in_waiting > 0: + line = self._serial.readline().decode().strip() + if "PONG" in line: + if self._verbose_logging: + pub.sendMessage('log:info', msg=f'[SerialManager] Connection verified on second attempt') + return True + + pub.sendMessage('log:warn', msg=f'[SerialManager] Connection verification failed (no PONG response)') + return False + + except Exception as e: + pub.sendMessage('log:error', msg=f'[SerialManager] Error verifying connection: {e}') + return False + + def get_serial(self, module_name): + """Get the shared serial connection for a module""" + if DISABLE_SERIAL_SHARE_MANAGER: + return None + with self._lock: + if self._serial is not None and self._initialized: + self._active_modules[module_name] = time.time() + if self._verbose_logging: + try: + pub.sendMessage('log', msg=f'[SerialManager] Providing serial to {module_name}') + except: + # Suppress errors if pub not ready + pass + return self._serial + else: + if self._verbose_logging: + try: + pub.sendMessage('log', msg=f'[SerialManager] Serial not available for {module_name}') + except: + # Suppress errors if pub not ready + pass + return None + + def send_command(self, command, module_name="unknown"): + """Direkt komut gönderme methodu""" + if DISABLE_SERIAL_SHARE_MANAGER: + return False + with self._lock: + if self._serial and self._initialized: + try: + # Komut bir satır sonu ile bitmiyorsa ekle + if not command.endswith('\n'): + command += '\n' + + # Komutu gönder + self._serial.write(command.encode()) + + # İşleme için kısa süre bekle + time.sleep(0.05) + + if self._verbose_logging: + pub.sendMessage('log:debug', msg=f'[SerialManager] Command sent from {module_name}: {command.strip()}') + + return True + except Exception as e: + pub.sendMessage('log:error', msg=f'[SerialManager] Error sending command from {module_name}: {e}') + else: + if self._verbose_logging: + pub.sendMessage('log:warn', msg=f'[SerialManager] Cannot send command: serial not initialized') + + return False + + def connect(self, serial_port, baudrate=115200, provider_name="auto_connect"): + """Seri port bağlantısını kur ve paylaş""" + if DISABLE_SERIAL_SHARE_MANAGER: + return False + with self._lock: + # Eğer halihazırda bir seri port bağlantısı varsa, kapat + if self._serial and self._serial.is_open: + try: + self._serial.close() + time.sleep(0.5) # Kapanma için bekle + except: + pass + + try: + # Bağlantı için 3 deneme yap + for attempt in range(3): + try: + serial_connection = serial.Serial( + serial_port, + baudrate, + timeout=1, + write_timeout=1 + ) + + # Veri akışını temizle + serial_connection.reset_input_buffer() + serial_connection.reset_output_buffer() + + # ESP32'nin hazır olması için bekle + time.sleep(1.5) + + # Bağlantıyı test et + serial_connection.write(b"PING\n") + time.sleep(0.5) + + response = "" + start_time = time.time() + while time.time() - start_time < 2.0: + if serial_connection.in_waiting > 0: + line = serial_connection.readline().decode().strip() + if "PONG" in line: + # Bağlantı başarılı + self._serial = serial_connection + self._initialized = True + self._active_modules[provider_name] = time.time() + + pub.sendMessage('log:info', msg=f'[SerialManager] Connected to {serial_port} at {baudrate} baud') + + # Haber ver + threading.Timer(0.5, lambda: pub.sendMessage('esp32:serial_global_ready')).start() + + return True + + # PONG alınamadı, birkaç saniye bekleyip tekrar dene + time.sleep(2) + + except Exception as e: + pub.sendMessage('log:warn', msg=f'[SerialManager] Connection attempt {attempt+1} failed: {e}') + time.sleep(1) + + pub.sendMessage('log:error', msg=f'[SerialManager] Failed to connect after 3 attempts') + return False + + except Exception as e: + pub.sendMessage('log:error', msg=f'[SerialManager] Connection error: {e}') + return False + + def get_active_modules(self): + """Get a list of active modules using the serial connection""" + if DISABLE_SERIAL_SHARE_MANAGER: + return [] + with self._lock: + return list(self._active_modules.keys()) + + def set_verbose_logging(self, verbose): + """Enable or disable verbose logging""" + if DISABLE_SERIAL_SHARE_MANAGER: + return + self._verbose_logging = verbose + +# Create a global instance +serialManager = SerialShareManager() + +def register_serial(serial_connection, provider_name='neopixel'): + """Register a serial connection to be shared""" + if DISABLE_SERIAL_SHARE_MANAGER: + return False + try: + global serialManager + serialManager.register_serial_provider(serial_connection, provider_name) + return True + except Exception as e: + print(f"Error in register_serial: {e}") + traceback.print_exc() + return False + +def get_serial(module_name): + """Get the shared serial connection for a module""" + if DISABLE_SERIAL_SHARE_MANAGER: + return None + try: + global serialManager + return serialManager.get_serial(module_name) + except Exception as e: + print(f"Error in get_serial: {e}") + traceback.print_exc() + return None + +def send_command(command, module_name="unknown"): + """Komut gönderme fonksiyonu""" + if DISABLE_SERIAL_SHARE_MANAGER: + return False + global serialManager + return serialManager.send_command(command, module_name) + +def connect_serial(serial_port, baudrate=115200): + """Seri port bağlantısı kur""" + if DISABLE_SERIAL_SHARE_MANAGER: + return False + global serialManager + return serialManager.connect(serial_port, baudrate) + +def set_verbose_logging(verbose): + """Set verbose logging for SerialShareManager""" + if DISABLE_SERIAL_SHARE_MANAGER: + return + global serialManager + serialManager.set_verbose_logging(verbose) diff --git a/modules/actuators/piservo.py b/modules/actuators/piservo.py index 255c01e4..2ae2b47a 100644 --- a/modules/actuators/piservo.py +++ b/modules/actuators/piservo.py @@ -2,44 +2,198 @@ from modules.config import Config from time import sleep from pubsub import pub +import serial +from modules.neopixel.neopx import get_shared_esp32_serial +import threading +import sys +import os + +# Add path to SerialShareManager +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))) +from modules.SerialShareManager import get_serial + +# No longer need a global ESP32 connection - using the shared one class PiServo: def __init__(self, **kwargs): """ PiServo class - :param kwargs: pin, range, start + :param kwargs: pin, range, start, protocol, id, name :param pin: GPIO pin number :param range: [min, max] angle range :param start: initial angle + :param protocol: 'GPIO' (default) or 'ESP32' + :param id: Servo ID for identification + :param name: Servo name for logging and subscription Install: pip install gpiozero - Subscribes to 'piservo:move' to move servo + Subscribes to 'piservo:move' to move any servo + - Argument: angle (int) - angle to move servo + + Subscribes to 'piservo:move:{name}' to move specific servo - Argument: angle (int) - angle to move servo Example: - pub.sendMessage('piservo:move', angle=90) + pub.sendMessage('piservo:move', angle=90) + pub.sendMessage('piservo:move:ear', angle=45) """ - self.pin = kwargs.get('pin') self.range = kwargs.get('range') self.start = kwargs.get('start', 0) + self.id = kwargs.get('id', 0) + self.name = kwargs.get('name', f'servo_{self.id}') self.servo = None + self.serial = None # Will be set if ESP32 protocol is used + self.verbose_logging = kwargs.get('verbose_logging', False) # Add logging control + + # Protokol desteği ekleme + accepted_protocols = ['GPIO', 'ESP32'] + self.protocol = kwargs.get('protocol', 'GPIO') + if self.protocol not in accepted_protocols: + raise ValueError("Invalid protocol specified. Choose one of: " + ", ".join(accepted_protocols)) + + # ESP32 protocol handling with improved retry logic + if self.protocol == 'ESP32': + # Subscribe to both events - the original and the new global event + pub.subscribe(self.on_serial_ready, 'esp32:serial_ready') + pub.subscribe(self.on_serial_ready, 'esp32:serial_global_ready') + + # Try to get the shared serial connection now - it might already be available + self.serial = get_serial(f'servo_{self.name}') + if not self.serial: + # Try the old way too as fallback + self.serial = get_shared_esp32_serial() + + # Start retry timer with increased initial delay + retry_timer = threading.Timer(5.0, self.retry_get_serial) + retry_timer.daemon = True + retry_timer.start() + + # Main movement subscription pub.subscribe(self.move, 'piservo:move') - # print(self.range) - # self.move(0) - # sleep(2) - # self.move(self.range[0]) - # sleep(2) - # self.move(self.range[1]) - # sleep(2) - self.move(self.start) + # Servo-specific subscription + specific_topic = f'piservo:move:{self.name}' + pub.subscribe(self.move, specific_topic) + + # Initialize to starting position with increased delay + timer = threading.Timer(2.0, lambda: self.move(self.start)) + timer.daemon = True + timer.start() + + def retry_get_serial(self): + """Retry getting the serial connection with progressive delay""" + if self.serial is None: + # First try new method + self.serial = get_serial(f'servo_{self.name}') + if not self.serial: + # Fall back to old method + self.serial = get_shared_esp32_serial() + + if self.serial: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name} connected to shared ESP32 serial on retry') + self.move(self.start) + else: + # Progressive backoff - increase delay each time + delay = min(10.0, 2.0 + len(self.name) % 3) # Vary delay slightly per instance + retry_timer = threading.Timer(delay, self.retry_get_serial) + retry_timer.daemon = True + retry_timer.start() + def on_serial_ready(self): + """Called when the shared serial connection becomes available""" + # Get the serial connection and log + self.serial = get_shared_esp32_serial() + if self.serial: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name} connected to shared ESP32 serial') + # Move to start position once serial is available + self.move(self.start) + def move(self, angle): + """ + Move servo to specified angle + :param angle: Angle to move servo to + """ + # Açı değerini izin verilen aralıkta tut + if self.range: + if angle < self.range[0]: + angle = self.range[0] + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name}: Angle limited to minimum {self.range[0]}') + elif angle > self.range[1]: + angle = self.range[1] + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name}: Angle limited to maximum {self.range[1]}') + + # ESP32 protokolü kullanıyorsa + if self.protocol == 'ESP32': + self.send_servo_command(angle) + return + + # GPIO protokolü için orijinal kod if self.servo is None: self.servo = AngularServo(self.pin, min_angle=self.range[0], max_angle=self.range[1], initial_angle=self.start) self.servo.angle = angle # Changes the angle (to move the servo) sleep(1) # @TODO: Remove this sleep - self.servo.detach() # Detaches the servo (to stop jitter) \ No newline at end of file + self.servo.detach() # Detaches the servo (to stop jitter) + + def send_servo_command(self, angle): + """ + Send servo command to ESP32 + :param angle: Angle to move servo to + """ + if self.protocol != 'ESP32': + return + + if self.serial is None: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name}: Cannot send command - no serial connection') + # Try to reconnect to serial + self.serial = get_shared_esp32_serial() + if self.serial is None: + return + + # Constrain angle to valid range + if self.range: + if angle < self.range[0]: + angle = self.range[0] + elif angle > self.range[1]: + angle = self.range[1] + + # Create and send command + cmd = f"SERVO {self.pin} {angle}\n" + # Only log actual movement commands when verbose is enabled + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name}: Sending command: {cmd.strip()}') + + try: + self.serial.write(cmd.encode()) + sleep(0.1) # Brief pause for command processing + except Exception as e: + pub.sendMessage('log', msg=f'[SERVO] ERROR: Failed to send command to ESP32: {e}') + self.serial = None # Reset the serial connection to trigger a reconnect + + def detach(self): + """ + Detach the servo to prevent jitter + """ + if self.protocol == 'ESP32' and self.serial is not None: + cmd = f"SERVO_DETACH {self.pin}\n" + if self.verbose_logging: + pub.sendMessage('log', msg=f'[SERVO] {self.name}: Detaching servo on pin {self.pin}') + try: + self.serial.write(cmd.encode()) + except Exception as e: + pub.sendMessage('log', msg=f'[SERVO] ERROR: Failed to send detach command: {e}') + elif self.servo: + self.servo.detach() + + def __del__(self): + """ + Clean up resources when object is destroyed + """ + self.detach() diff --git a/modules/actuators/servo.py b/modules/actuators/servo.py index 6acd03c3..e8f7e731 100644 --- a/modules/actuators/servo.py +++ b/modules/actuators/servo.py @@ -56,6 +56,8 @@ def __init__(self, **kwargs): pub.subscribe(self.move, 'servo:' + self.identifier + ':mvabs') pub.subscribe(self.move_relative, 'servo:' + self.identifier + ':mv') + # Genel animasyon komutlarını dinle + pub.subscribe(self.handle_servo_animation, 'servo:move') def __del__(self): pass #self.reset() @@ -186,3 +188,25 @@ def translate(self, value): # Convert the 0-1 range into a value in the right range. return self.range[0] + (valueScaled * rightSpan) + + def handle_servo_animation(self, data): + """ + DeskGUI veya CommandReceiver'dan gelen genel animasyon komutlarını işler. + :param data: {'animation': animasyon_adı, 'repeat': tekrar_sayısı} + """ + animation = data.get('animation', '').upper() + repeat = data.get('repeat', 1) + # Sadece bu servo ilgili animasyonda rol oynuyorsa hareket etsin + # Örnek: pan servosu HEAD_LEFT animasyonunda hareket eder + if animation == 'HEAD_LEFT' and self.identifier == 'pan': + for _ in range(repeat): + self.move(0) # Sola bak (örnek değer) + sleep(0.5) + self.move(50) # Ortaya dön + elif animation == 'HEAD_RIGHT' and self.identifier == 'pan': + for _ in range(repeat): + self.move(100) # Sağa bak (örnek değer) + sleep(0.5) + self.move(50) # Ortaya dön + # Diğer animasyonlar için benzer şekilde ekleyebilirsiniz + # İlgili servo ve animasyon eşleşmiyorsa hiçbir şey yapmaz diff --git a/modules/animate.py b/modules/animate.py index 954183bd..153ecdf6 100644 --- a/modules/animate.py +++ b/modules/animate.py @@ -37,7 +37,17 @@ def animate(self, action): cmd = list(step.keys())[0] args = list(step.values()) if 'servo:' in cmd: - pub.sendMessage(cmd, percentage=args[0]) + # Hatalı veya eksik veri varsa atla ve logla + try: + val = args[0] + if val is None or (isinstance(val, str) and not val.strip()): + pub.sendMessage('log:error', msg=f"[Animate] Invalid servo value in animation '{action}': {cmd} -> {val}") + continue + val = float(val) + except Exception as e: + pub.sendMessage('log:error', msg=f"[Animate] Exception parsing servo value in animation '{action}': {cmd} -> {args[0]} | {e}") + continue + pub.sendMessage(cmd, percentage=val) elif 'sleep' == cmd: sleep(args[0]) elif 'animate' == cmd: diff --git a/modules/audio/bluetooth_audio.py b/modules/audio/bluetooth_audio.py new file mode 100644 index 00000000..bdef814f --- /dev/null +++ b/modules/audio/bluetooth_audio.py @@ -0,0 +1,859 @@ +import socket +import threading +import pickle +import struct +import speech_recognition as sr +import pyttsx3 +from pubsub import pub +import os +import time +import json +import queue + +# Optional FastAPI support for improved performance +try: + import uvicorn + from fastapi import FastAPI, WebSocket + from fastapi.responses import JSONResponse + import asyncio + from fastapi.middleware.cors import CORSMiddleware + FASTAPI_AVAILABLE = True +except ImportError: + FASTAPI_AVAILABLE = False + +# Zeroconf for service discovery +try: + from zeroconf import ServiceInfo, Zeroconf + import socket + import ipaddress + ZEROCONF_AVAILABLE = True +except ImportError: + ZEROCONF_AVAILABLE = False + +class BluetoothAudioServer: + """ + Bluetooth Audio Server for handling TTS and Speech Recognition via network + + Run this on the laptop to serve as a Bluetooth audio bridge for the robot + """ + def __init__(self, **kwargs): + """ + Initialize Bluetooth Audio Server + :param kwargs: host, tts_port, speech_port, use_fastapi + """ + self.host = kwargs.get('host', '0.0.0.0') + self.tts_port = kwargs.get('tts_port', 8095) + self.speech_port = kwargs.get('speech_port', 8096) + self.use_fastapi = kwargs.get('use_fastapi', FASTAPI_AVAILABLE) + self.fastapi_port = kwargs.get('fastapi_port', 8098) + self.auto_start_speech = kwargs.get('auto_start_speech', False) + self.register_service = kwargs.get('register_service', True) + self.service_name = kwargs.get('service_name', "SentryBOT_Audio") + + # TTS engine + self.engine = pyttsx3.init() + voices = self.engine.getProperty('voices') + voice_idx = kwargs.get('voice_idx', 0) + if voice_idx < len(voices): + self.engine.setProperty('voice', voices[voice_idx].id) + + # Add a TTS request queue with a worker thread + self.tts_queue = queue.Queue() + self.tts_thread = threading.Thread(target=self._tts_worker, daemon=True) + self.tts_thread_running = True + self.tts_thread.start() + + # Speech recognition + self.recognizer = sr.Recognizer() + self.recognizer.pause_threshold = 1 + self.listening = False + self.language = kwargs.get('language', 'en-US') # Default language + + # Find real IP for service registration + self.real_ip = self._get_local_ip() + + # Microphone setup + self.device_name = kwargs.get('device_name', None) + self.device_index = self._get_device_index(self.device_name) + self.sample_rate = kwargs.get('sample_rate', 16000) + + # Clients + self.tts_clients = [] + self.speech_clients = [] + self.websocket_clients = set() + + # Zeroconf service registration + self.zeroconf = None + self.service_info = None + + # Setup server sockets + self.tts_server = None + self.speech_server = None + self.fastapi_app = None + + # Print available microphones + print("Available microphones:") + for i, name in enumerate(sr.Microphone.list_microphone_names()): + print(f"{i}: {name}") + + # Print available TTS voices + print("\nAvailable TTS voices:") + for i, voice in enumerate(voices): + print(f"{i}: {voice.name} ({voice.id})") + + def _get_local_ip(self): + """Get the local IP address""" + try: + # This creates a socket that doesn't actually connect to anything + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + # But we need a target to "connect" to (no packet is sent) + s.connect(("8.8.8.8", 80)) + ip = s.getsockname()[0] + s.close() + return ip + except Exception as e: + print(f"Error getting local IP: {e}") + return '127.0.0.1' # fallback + + def _get_device_index(self, device_name): + """Get microphone device index by name""" + if not device_name: + return None + + for index, name in enumerate(sr.Microphone.list_microphone_names()): + if name == device_name: + print(f"Using microphone: {name} (index {index})") + return index + + print(f"Warning: Microphone '{device_name}' not found, using default") + return None + + def start(self): + """Start the Bluetooth Audio Server""" + print("Starting Bluetooth Audio Server") + + # Run diagnostics + self._check_audio_environment() + + # Start FastAPI if enabled + if self.use_fastapi and FASTAPI_AVAILABLE: + self._start_fastapi_server() + + # Start TTS server + self.tts_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.tts_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.tts_server.bind((self.host, self.tts_port)) + self.tts_server.listen(5) + print(f"TTS Server listening on {self.real_ip}:{self.tts_port}") + + tts_thread = threading.Thread(target=self._accept_tts_clients, daemon=True) + tts_thread.start() + + # Start Speech server + self.speech_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.speech_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.speech_server.bind((self.host, self.speech_port)) + self.speech_server.listen(5) + print(f"Speech Server listening on {self.real_ip}:{self.speech_port}") + + speech_thread = threading.Thread(target=self._accept_speech_clients, daemon=True) + speech_thread.start() + + # Start heartbeat monitor to keep connections alive + heartbeat_thread = threading.Thread(target=self._connection_heartbeat, daemon=True) + heartbeat_thread.start() + + # Register services with zeroconf if enabled + if self.register_service and ZEROCONF_AVAILABLE: + self._register_zeroconf_services() + + # Auto-start speech recognition if enabled + if self.auto_start_speech: + print("Auto-starting speech recognition...") + self.listening = True + threading.Thread(target=self._auto_speech_recognition, daemon=True).start() + + def _connection_heartbeat(self): + """Send periodic heartbeats to clients to keep connections alive""" + while True: + try: + # Check TTS clients + for client in list(self.tts_clients): + try: + # Simple non-blocking check if client is still connected + client.setblocking(0) + try: + # 0-byte read - just to check if connection is alive + data = client.recv(1, socket.MSG_PEEK) + if not data: # Connection closed + print(f"Removing dead TTS client") + self.tts_clients.remove(client) + continue + except BlockingIOError: + # This is expected - no data available but socket is alive + pass + except Exception as e: + print(f"TTS client heartbeat error: {e}") + if client in self.tts_clients: + self.tts_clients.remove(client) + + # Reset to blocking mode + client.setblocking(1) + except Exception as e: + print(f"Error checking TTS client: {e}") + if client in self.tts_clients: + self.tts_clients.remove(client) + + # Check Speech clients + for client in list(self.speech_clients): + try: + client.setblocking(0) + try: + data = client.recv(1, socket.MSG_PEEK) + if not data: # Connection closed + print(f"Removing dead Speech client") + self.speech_clients.remove(client) + continue + except BlockingIOError: + # This is expected + pass + except Exception as e: + print(f"Speech client socket error: {e}") + if client in self.speech_clients: + self.speech_clients.remove(client) + + # Reset to blocking mode + client.setblocking(1) + except Exception as e: + print(f"Error checking Speech client: {e}") + if client in self.speech_clients: + self.speech_clients.remove(client) + except Exception as e: + print(f"Heartbeat error: {e}") + + # Sleep for a bit before next heartbeat check + time.sleep(10) + + def _register_zeroconf_services(self): + """Register services with zeroconf for auto-discovery""" + if not ZEROCONF_AVAILABLE: + print("Zeroconf not available. Service registration disabled.") + return + + try: + self.zeroconf = Zeroconf() + + # Convert IP to bytes array + ip_bytes = socket.inet_aton(self.real_ip) + + # Register TTS service + tts_info = ServiceInfo( + "_sentryaudio._tcp.local.", + f"{self.service_name}_TTS._sentryaudio._tcp.local.", + addresses=[ip_bytes], + port=self.tts_port, + properties={ + "server_type": "tts", + "version": "1.0" + }, + server=f"{self.service_name}.local." + ) + + # Register Speech service + speech_info = ServiceInfo( + "_sentryaudio._tcp.local.", + f"{self.service_name}_SPEECH._sentryaudio._tcp.local.", + addresses=[ip_bytes], + port=self.speech_port, + properties={ + "server_type": "speech", + "language": self.language, + "version": "1.0" + }, + server=f"{self.service_name}.local." + ) + + # Register FastAPI service if enabled + if self.use_fastapi and FASTAPI_AVAILABLE: + api_info = ServiceInfo( + "_sentryaudio._tcp.local.", + f"{self.service_name}_API._sentryaudio._tcp.local.", + addresses=[ip_bytes], + port=self.fastapi_port, + properties={ + "server_type": "api", + "version": "1.0" + }, + server=f"{self.service_name}.local." + ) + self.zeroconf.register_service(api_info) + + # Register the services + self.zeroconf.register_service(tts_info) + self.zeroconf.register_service(speech_info) + + print("Services registered with zeroconf for auto-discovery") + + except Exception as e: + print(f"Failed to register services with zeroconf: {e}") + if self.zeroconf: + self.zeroconf.close() + self.zeroconf = None + + def _auto_speech_recognition(self): + """Run speech recognition automatically without waiting for client requests""" + print(f"Starting automatic speech recognition with device_index={self.device_index}") + try: + with sr.Microphone(device_index=self.device_index, sample_rate=self.sample_rate) as source: + print("Adjusting for ambient noise (auto-start)...") + self.recognizer.adjust_for_ambient_noise(source) + print(f"Auto-start listening for speech in language: {self.language}...") + + while self.listening: + try: + print("Waiting for speech...") + audio = self.recognizer.listen(source, timeout=5, phrase_time_limit=10) + print("Processing speech...") + text = self.recognizer.recognize_google(audio, language=self.language) + print(f"Auto-recognized: '{text}'") + + # Send the recognized text to all connected clients + self._broadcast_speech_result(text) + + except sr.WaitTimeoutError: + print("Speech timeout, continuing to listen...") + except sr.UnknownValueError: + print("Could not understand audio, continuing to listen...") + except Exception as e: + print(f"Auto speech recognition error: {e}") + except Exception as e: + print(f"Auto microphone error: {e}") + self.listening = False # Stop on error + + print("Speech recognition stopped") + + def _broadcast_speech_result(self, text): + """Broadcast speech recognition results to all clients""" + # Prepare data for socket clients + print(f"Broadcasting speech result to {len(self.speech_clients)} clients: '{text}'") + speech_data = {'text': text} + data = pickle.dumps(speech_data) + + # Send to all connected speech clients + clients_to_remove = [] + for client in self.speech_clients: + try: + print(f"Sending speech result to client...") + # Send message size first (4 bytes) + client.sendall(len(data).to_bytes(4, byteorder='big')) + # Then send the actual data + client.sendall(data) + print(f"Speech result sent successfully") + except Exception as e: + print(f"Error sending speech result to client: {e}") + clients_to_remove.append(client) + + # Remove disconnected clients + for client in clients_to_remove: + if client in self.speech_clients: + self.speech_clients.remove(client) + print(f"Removed disconnected client") + + # Send via WebSocket if available + if self.use_fastapi and FASTAPI_AVAILABLE: + for websocket in list(self.websocket_clients): + try: + asyncio.run_coroutine_threadsafe( + websocket.send_json({"type": "result", "text": text}), + asyncio.get_event_loop() + ) + except Exception as e: + print(f"Error sending to WebSocket: {e}") + + def _start_fastapi_server(self): + """Start FastAPI server for WebSocket communication""" + self.fastapi_app = FastAPI() + + # Add CORS middleware + self.fastapi_app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + # Health check endpoint + @self.fastapi_app.get("/health") + async def health_check(): + return { + "status": "ok", + "services": { + "tts": True, + "speech": True, + "listening": self.listening + } + } + + # Status endpoint with more details + @self.fastapi_app.get("/status") + async def server_status(): + return { + "status": "running", + "listening": self.listening, + "language": self.language, + "tts_clients": len(self.tts_clients), + "speech_clients": len(self.speech_clients), + "websocket_clients": len(self.websocket_clients), + "device_name": self.device_name, + "device_index": self.device_index + } + + # Add control endpoint to start/stop speech recognition + @self.fastapi_app.post("/control/speech") + async def control_speech(action: dict): + if action.get("command") == "start": + if not self.listening: + self.listening = True + threading.Thread(target=self._auto_speech_recognition, daemon=True).start() + return {"status": "speech recognition started"} + elif action.get("command") == "stop": + self.listening = False + return {"status": "speech recognition stopped"} + return {"status": "invalid command"} + + # Add control endpoint to set language + @self.fastapi_app.post("/control/language") + async def set_language(language_data: dict): + if "language" in language_data: + self.language = language_data["language"] + return {"status": "language updated", "language": self.language} + return {"status": "error", "message": "language parameter missing"} + + # WebSocket endpoint for TTS + @self.fastapi_app.websocket("/ws/tts") + async def tts_websocket(websocket: WebSocket): + await websocket.accept() + print("TTS WebSocket client connected") + self.websocket_clients.add(websocket) + + try: + while True: + data = await websocket.receive_text() + message = json.loads(data) + + if "text" in message: + print(f"TTS WebSocket request: {message['text']}") + # Run in a thread to not block the WebSocket + threading.Thread( + target=self._process_tts, + args=(message['text'],), + daemon=True + ).start() + await websocket.send_json({"status": "speaking"}) + except Exception as e: + print(f"TTS WebSocket error: {e}") + finally: + if websocket in self.websocket_clients: + self.websocket_clients.remove(websocket) + + # WebSocket endpoint for speech recognition + @self.fastapi_app.websocket("/ws/speech") + async def speech_websocket(websocket: WebSocket): + await websocket.accept() + print("Speech WebSocket client connected") + self.websocket_clients.add(websocket) + + try: + while True: + data = await websocket.receive_text() + command = json.loads(data) + + if command.get("action") == "start": + print("Starting speech recognition (WebSocket)") + # Use a thread for recognition to avoid blocking + self.listening = True + threading.Thread( + target=self._websocket_speech_recognition, + args=(websocket,), + daemon=True + ).start() + await websocket.send_json({"status": "listening"}) + elif command.get("action") == "stop": + print("Stopping speech recognition (WebSocket)") + self.listening = False + await websocket.send_json({"status": "stopped"}) + except Exception as e: + print(f"Speech WebSocket error: {e}") + finally: + if websocket in self.websocket_clients: + self.websocket_clients.remove(websocket) + + # Start FastAPI in a separate thread + threading.Thread( + target=lambda: uvicorn.run( + self.fastapi_app, + host=self.host, + port=self.fastapi_port, + log_level="info" + ), + daemon=True + ).start() + + print(f"FastAPI server started on {self.host}:{self.fastapi_port}") + + def _accept_tts_clients(self): + """Accept TTS client connections""" + while True: + try: + client, addr = self.tts_server.accept() + print(f"TTS client connected from {addr}") + self.tts_clients.append(client) + threading.Thread(target=self._handle_tts_client, args=(client, addr), daemon=True).start() + except Exception as e: + print(f"TTS server error: {e}") + break + + def _handle_tts_client(self, client, addr): + """Handle TTS client messages""" + try: + print(f"TTS client handler started for {addr}") + while True: + # Receive data from client + data = b"" + while True: + try: + chunk = client.recv(4096) + if not chunk: + raise ConnectionError("Connection closed") + data += chunk + + # Try to unpickle the data to see if it's complete + try: + pickle.loads(data) + break + except: + # Data not complete yet, continue receiving + pass + except Exception as e: + print(f"Error receiving TTS data: {e}") + raise + + # Process the TTS request + try: + message = pickle.loads(data) + print(f"Received TTS message: {message}") + if message.get('type') == 'tts' and 'text' in message: + print(f"TTS request: {message['text']}") + self._process_tts(message['text']) + else: + print(f"Invalid TTS message format: {message}") + except Exception as e: + print(f"Error processing TTS message: {e}") + except Exception as e: + print(f"TTS client error ({addr}): {e}") + finally: + if client in self.tts_clients: + self.tts_clients.remove(client) + try: + client.close() + print(f"Closed TTS connection to {addr}") + except: + pass + + def _tts_worker(self): + """Process TTS requests from queue to avoid concurrent engine.runAndWait() calls""" + while self.tts_thread_running: + try: + # Get text from queue with timeout + try: + text = self.tts_queue.get(timeout=1.0) + except queue.Empty: + continue + + print(f"Processing queued TTS request: '{text}'") + + try: + # Attempt to get the default output device name for diagnostic info + try: + import sounddevice as sd + device_info = sd.query_devices(kind='output') + print(f"Using audio output device: {device_info['name']}") + except: + print("Could not determine output device") + + # Speak the text - with proper error handling for "run loop already started" + self.engine.say(text) + try: + self.engine.runAndWait() + print(f"TTS completed for: '{text}'") + except RuntimeError as e: + if "run loop already started" in str(e): + print("TTS run loop already started, creating new engine instance") + # Create a new engine instance to avoid the "run loop already started" error + self.engine = pyttsx3.init() + voices = self.engine.getProperty('voices') + voice_idx = 0 # Default to first voice + if len(voices) > voice_idx: + self.engine.setProperty('voice', voices[voice_idx].id) + # Try speaking again with new engine instance + self.engine.say(text) + self.engine.runAndWait() + print(f"TTS retry completed for: '{text}'") + else: + raise # Re-raise if it's a different RuntimeError + except Exception as e: + print(f"TTS processing error: {e}") + # Try to reinitialize the engine if there was an error + try: + print("Reinitializing TTS engine...") + # Force stop any running engine + try: + self.engine.endLoop() + except: + pass + + self.engine = pyttsx3.init() + voices = self.engine.getProperty('voices') + # Get current voice setting + for i, voice in enumerate(voices): + if voice.id == self.engine.getProperty('voice'): + self.engine.setProperty('voice', voice.id) + break + except Exception as reinit_e: + print(f"Failed to reinitialize TTS engine: {reinit_e}") + + # Mark task as done + self.tts_queue.task_done() + except Exception as e: + print(f"TTS worker thread error: {e}") + time.sleep(1) # Avoid tight loop on error + + def _process_tts(self, text): + """Add TTS request to queue instead of processing directly""" + try: + print(f"Queueing TTS request: '{text}'") + self.tts_queue.put(text) + except Exception as e: + print(f"Error queueing TTS request: {e}") + + def _check_audio_environment(self): + """Check the audio environment for diagnostics""" + print("\n=== Audio Environment Diagnostic ===") + + # Check if audio output devices are available + try: + import sounddevice as sd + devices = sd.query_devices() + print(f"Found {len(devices)} audio devices:") + + # Print input devices + input_devices = [d for d in devices if d['max_input_channels'] > 0] + print(f"Input devices ({len(input_devices)}):") + for i, dev in enumerate(input_devices): + print(f" {i}: {dev['name']} (channels: {dev['max_input_channels']})") + + # Print output devices + output_devices = [d for d in devices if d['max_output_channels'] > 0] + print(f"Output devices ({len(output_devices)}):") + for i, dev in enumerate(output_devices): + print(f" {i}: {dev['name']} (channels: {dev['max_output_channels']})") + + # Print default devices + default_input = sd.query_devices(kind='input') + default_output = sd.query_devices(kind='output') + print(f"Default input device: {default_input['name']}") + print(f"Default output device: {default_output['name']}") + except Exception as e: + print(f"Could not check audio devices: {e}") + + print("=== End of Audio Environment Diagnostic ===\n") + + def _accept_speech_clients(self): + """Accept Speech client connections""" + while True: + try: + client, addr = self.speech_server.accept() + print(f"Speech client connected from {addr}") + self.speech_clients.append(client) + threading.Thread(target=self._handle_speech_client, args=(client, addr), daemon=True).start() + except Exception as e: + print(f"Speech server error: {e}") + break + + def _handle_speech_client(self, client, addr): + """Handle speech client messages""" + try: + while True: + # Receive data from client + data = b"" + while True: + chunk = client.recv(4096) + if not chunk: + raise ConnectionError("Connection closed") + data += chunk + try: + # Try to unpickle the data to see if it's complete + pickle.loads(data) + break + except: + # Data not complete yet, continue receiving + pass + + # Process the speech command + command = pickle.loads(data) + if command.get('command') == 'start_listening': + print("Starting speech recognition") + if not self.listening: + self.listening = True + threading.Thread(target=self._speech_recognition, args=(client,), daemon=True).start() + elif command.get('command') == 'stop_listening': + print("Stopping speech recognition") + self.listening = False + elif command.get('command') == 'set_language': + self.language = command.get('language', 'en-US') + print(f"Speech recognition language set to: {self.language}") + except Exception as e: + print(f"Speech client error ({addr}): {e}") + finally: + if client in self.speech_clients: + self.speech_clients.remove(client) + try: + client.close() + except: + pass + + def _speech_recognition(self, client): + """Run speech recognition and send results to client""" + try: + with sr.Microphone(device_index=self.device_index, sample_rate=self.sample_rate) as source: + print("Adjusting for ambient noise...") + self.recognizer.adjust_for_ambient_noise(source) + print(f"Listening for speech in language: {self.language}...") + + while self.listening and client in self.speech_clients: + try: + audio = self.recognizer.listen(source, timeout=5, phrase_time_limit=10) + print("Processing speech...") + text = self.recognizer.recognize_google(audio, language=self.language) + print(f"Recognized: {text}") + + # Send the recognized text to the client + speech_data = {'text': text} + data = pickle.dumps(speech_data) + client.sendall(len(data).to_bytes(4, byteorder='big')) + client.sendall(data) + + # Also broadcast to other clients + self._broadcast_speech_result(text) + + except sr.WaitTimeoutError: + pass + except sr.UnknownValueError: + pass + except Exception as e: + print(f"Speech recognition error: {e}") + if not client in self.speech_clients: + break + except Exception as e: + print(f"Microphone error: {e}") + + def _websocket_speech_recognition(self, websocket): + """Run speech recognition and send results via WebSocket""" + self.listening = True + + try: + with sr.Microphone(device_index=self.device_index, sample_rate=self.sample_rate) as source: + print("Adjusting for ambient noise (WebSocket)...") + self.recognizer.adjust_for_ambient_noise(source) + print(f"Listening for speech (WebSocket) in language: {self.language}...") + + while self.listening and websocket in self.websocket_clients: + try: + audio = self.recognizer.listen(source, timeout=5, phrase_time_limit=10) + print("Processing speech (WebSocket)...") + text = self.recognizer.recognize_google(audio, language=self.language) + print(f"Recognized (WebSocket): {text}") + + # Send the recognized text via WebSocket + asyncio.run_coroutine_threadsafe( + websocket.send_json({"type": "result", "text": text}), + asyncio.get_event_loop() + ) + + # Also broadcast to other clients + self._broadcast_speech_result(text) + + except sr.WaitTimeoutError: + pass + except sr.UnknownValueError: + pass + except Exception as e: + print(f"WebSocket speech recognition error: {e}") + if websocket not in self.websocket_clients: + break + except Exception as e: + print(f"WebSocket microphone error: {e}") + + def stop(self): + """Stop the Bluetooth Audio Server""" + self.listening = False + self.tts_thread_running = False # Signal TTS worker to stop + + # Close TTS server + if self.tts_server: + try: + # Make sure the socket is still valid before closing + if hasattr(self.tts_server, '_closed') and not self.tts_server._closed: + self.tts_server.close() + elif isinstance(self.tts_server, socket.socket): + self.tts_server.close() + except Exception as e: + print(f"TTS server close error: {e}") + self.tts_server = None + + # Close Speech server + if self.speech_server: + try: + # Make sure the socket is still valid before closing + if hasattr(self.speech_server, '_closed') and not self.speech_server._closed: + self.speech_server.close() + elif isinstance(self.speech_server, socket.socket): + self.speech_server.close() + except Exception as e: + print(f"Speech server close error: {e}") + self.speech_server = None + + # Close all client connections + for client in list(self.tts_clients + self.speech_clients): + try: + client.close() + except Exception as e: + print(f"Client close error: {e}") + + # Clear client lists to prevent further attempts to use closed sockets + self.tts_clients.clear() + self.speech_clients.clear() + + # Unregister zeroconf services + if self.zeroconf: + try: + self.zeroconf.close() + except Exception as e: + print(f"Zeroconf close error: {e}") + self.zeroconf = None + + print("Bluetooth Audio Server stopped") + +# Run the server if executed directly +if __name__ == "__main__": + server = BluetoothAudioServer(use_fastapi=True, auto_start_speech=True) + server.start() + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + print("\nShutting down...") + finally: + server.stop() diff --git a/modules/audio/buzzer.py b/modules/audio/buzzer.py index 71fc2789..9389903e 100644 --- a/modules/audio/buzzer.py +++ b/modules/audio/buzzer.py @@ -1,8 +1,9 @@ from gpiozero import TonalBuzzer from gpiozero.tones import Tone import time - from pubsub import pub +from modules.config import Config +from modules.esp32serial import ESP32SerialManager from modules.audio.melodies.deck_the_halls import MelodyDeckTheHalls from modules.audio.melodies.happy_birthday import MelodyHappyBirthday @@ -12,7 +13,9 @@ class Buzzer: def __init__(self, **kwargs): """ Buzzer class - :param kwargs: pin + :param kwargs: pin, protocol, name, serial_port, baudrate + :param pin: GPIO pin number + :param protocol: 'GPIO' (default) or 'ESP32' Install: pip install gpiozero @@ -21,55 +24,95 @@ def __init__(self, **kwargs): Example: pub.sendMessage('play', song="happy birthday") # Also available: 'merry christmas' pub.sendMessage('buzz', frequency=440, length=0.5) - """ self.pin = kwargs.get('pin') - self.buzzer = TonalBuzzer(self.pin) + self.name = kwargs.get('name', 'buzzer') + self.buzzer = None + + # Protokol desteği ekleme + accepted_protocols = ['GPIO', 'ESP32'] + self.protocol = kwargs.get('protocol', 'GPIO') + if self.protocol not in accepted_protocols: + raise ValueError("Invalid protocol specified. Choose one of: " + ", ".join(accepted_protocols)) + + # ESP32 protokolü için serial bağlantı ayarları + if self.protocol == 'ESP32': + # Config'den ESP32 serial ayarlarını al + config = Config() + if 'serial_config' in config.config.get('buzzer', {}): + serial_config = config.config['buzzer']['serial_config'] + serial_port = serial_config.get('serial_port', '/dev/ttyUSB0') + baudrate = serial_config.get('baudrate', 115200) + else: + # Varsayılan değerler veya kwargs'den al + serial_port = kwargs.get('serial_port', '/dev/ttyUSB0') + baudrate = kwargs.get('baudrate', 115200) + + # Merkezi seri iletişim yöneticisini kullan + self.serial_manager = ESP32SerialManager(serial_port, baudrate) + pub.sendMessage('log', msg=f'[BUZZER] {self.name} initialized on pin {self.pin} using ESP32 protocol') + else: + # GPIO protokolü için TonalBuzzer oluştur + try: + self.buzzer = TonalBuzzer(self.pin) + pub.sendMessage('log', msg=f'[BUZZER] {self.name} initialized on pin {self.pin} using GPIO protocol') + except Exception as e: + pub.sendMessage('log', msg=f'[BUZZER] ERROR: Failed to initialize TonalBuzzer: {e}') + raise + # Orijinal mesajlara abone ol pub.subscribe(self.play_song, 'play') pub.subscribe(self.buzz, 'buzz') + # ESP32 protokolü için ek mesajlara abone ol + pub.subscribe(self.beep, 'buzzer:beep') + specific_topic = f'buzzer:beep:{self.name}' + pub.subscribe(self.beep, specific_topic) + + def beep(self, duration=0.5): + """ + Make buzzer beep for given duration (ESP32 protocol) + :param duration: Duration in seconds + """ + if self.protocol == 'ESP32': + self.send_buzzer_command(duration) + else: + # GPIO protokolü için basit beep işlemi + self.buzz(440, duration) # 440Hz (A4) standart beep + + def send_buzzer_command(self, duration, frequency=440): + """ + Send buzzer command to ESP32 + :param duration: Duration in seconds + :param frequency: Frequency in Hz + """ + if self.protocol != 'ESP32': + return + + # Komutu oluştur ve gönder + cmd = f"BUZZER {self.pin} {frequency} {duration}" + + try: + self.serial_manager.send_command(cmd) + except Exception as e: + pub.sendMessage('log', msg=f'[BUZZER] ERROR: Failed to send command to ESP32: {e}') + def buzz(self, frequency, length): """ Buzz the buzzer :param frequency: Frequency of the buzz :param length: Length of the buzz """ - if (frequency == 0): + if self.protocol == 'ESP32': + self.send_buzzer_command(length, frequency) + return + + # Orijinal GPIO protokolü kodu + if frequency == 0: time.sleep(length) return self.buzzer.play(Tone(frequency)) time.sleep(length) self.buzzer.stop() - def play_song(self, song): - """ - Play a song - :param song: Song to play - """ - if 'merry christmas' in song: - self.play(MelodyDeckTheHalls.MELODY, MelodyDeckTheHalls.TEMPO, MelodyDeckTheHalls.PAUSE, MelodyDeckTheHalls.PACE) - if 'happy birthday' in song: - self.play(MelodyHappyBirthday.MELODY, MelodyHappyBirthday.TEMPO, MelodyHappyBirthday.PAUSE, - MelodyHappyBirthday.PACE) - self.buzzer.stop() - - def play(self, melody, tempo, pause, pace=0.800): - """ - Play a melody - :param melody: Melody to play - :param tempo: Tempo of the melody - :param pause: Pause between notes - :param pace: Pace of the melody - - See `melodies` for examples - """ - for i in range(0, len(melody)): # Play song - - noteDuration = pace / tempo[i] - if type(melody[i]) is not int: - melody[i] = MelodyNotes.notes[melody[i]] - self.buzz(melody[i], noteDuration) # Change the frequency along the song note - - pauseBetweenNotes = noteDuration * pause - time.sleep(pauseBetweenNotes) \ No newline at end of file + # ...existing code... diff --git a/modules/audio/speechinput.py b/modules/audio/speechinput.py index ecc4aefc..51a1d043 100644 --- a/modules/audio/speechinput.py +++ b/modules/audio/speechinput.py @@ -2,20 +2,51 @@ from pubsub import pub from time import sleep from threading import Thread +import socket +import json +import pickle +import threading +import time + +# Add zeroconf for service discovery +try: + from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf + ZEROCONF_AVAILABLE = True +except ImportError: + ZEROCONF_AVAILABLE = False + pub.sendMessage('log:error', msg='[Speech] Zeroconf not available. Auto-discovery disabled.') class SpeechInput: """ Use speech_recognition to detect and interpret audio """ def __init__(self, **kwargs): - self.recognizer = sr.Recognizer() - self.recognizer.pause_threshold = 1 + self.protocol = kwargs.get('protocol', 'direct') + self.language = kwargs.get('language', 'en-US') # Default to US English + self.auto_discover = kwargs.get('auto_discover', True) + + if self.protocol == 'direct': + self.recognizer = sr.Recognizer() + self.recognizer.pause_threshold = 1 - self.device_name = kwargs.get('device_name', 'lp') - self.device = self.get_device_index(self.device_name) - self.sample_rate = kwargs.get('sample_rate', 16000) + self.device_name = kwargs.get('device_name', 'lp') + self.device = self.get_device_index(self.device_name) + self.sample_rate = kwargs.get('sample_rate', 16000) - self.mic = sr.Microphone(device_index=self.device, sample_rate=self.sample_rate) + self.mic = sr.Microphone(device_index=self.device, sample_rate=self.sample_rate) + else: # bluetooth + self.bluetooth_server = kwargs.get('bluetooth_server', '') + self.bluetooth_port = kwargs.get('bluetooth_port', 8096) + self.reconnect_timeout = kwargs.get('reconnect_timeout', 5) + self.bt_socket = None + self.bt_connected = False + + # If auto-discover is enabled and server is not specified, try to discover + if self.auto_discover and not self.bluetooth_server and ZEROCONF_AVAILABLE: + self.discover_bluetooth_server() + else: + Thread(target=self._connect_bluetooth, daemon=True).start() + self.listening = False pub.subscribe(self.start, 'speech:listen') @@ -25,10 +56,321 @@ def __init__(self, **kwargs): def __del__(self): self.stop() + if self.protocol == 'bluetooth' and self.bt_socket: + try: + self.bt_socket.close() + except: + pass + + def discover_bluetooth_server(self): + """Try to discover the Bluetooth audio server using zeroconf""" + if not ZEROCONF_AVAILABLE: + pub.sendMessage('log:error', msg='[Speech] Cannot auto-discover: zeroconf not available') + return + + found_servers = [] + + def on_service_state_change(zeroconf, service_type, name, state_change): + if state_change is ServiceStateChange.Added: + info = zeroconf.get_service_info(service_type, name) + if info: + addresses = [socket.inet_ntoa(addr) for addr in info.addresses] + if addresses: + server_data = { + 'address': addresses[0], + 'port': info.port, + 'name': info.server + } + found_servers.append(server_data) + pub.sendMessage('log', msg=f'[Speech] Discovered Bluetooth audio server: {server_data}') + + pub.sendMessage('log', msg='[Speech] Looking for Bluetooth audio servers...') + zeroconf = Zeroconf() + browser = ServiceBrowser(zeroconf, "_sentryaudio._tcp.local.", handlers=[on_service_state_change]) + + # Give some time for discovery + sleep(3) + zeroconf.close() + + if found_servers: + # Use the first server found + self.bluetooth_server = found_servers[0]['address'] + pub.sendMessage('log', msg=f'[Speech] Auto-selected Bluetooth server: {self.bluetooth_server}') + Thread(target=self._connect_bluetooth, daemon=True).start() + else: + # Fallback to local network scanning + self._scan_local_network() + + def _scan_local_network(self): + """Scan local network for potential Bluetooth audio servers""" + pub.sendMessage('log', msg='[Speech] Scanning local network for audio servers...') + + # Get local IP to determine network + local_ip = '' + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + local_ip = s.getsockname()[0] + s.close() + except: + local_ip = '127.0.0.1' + + # Extract network prefix + ip_parts = local_ip.split('.') + base_ip = f"{ip_parts[0]}.{ip_parts[1]}.{ip_parts[2]}." + + # Scan common IPs first + common_hosts = [ + '1', '2', '100', '101', '200', '254', # Common router/device IPs + '137.52' # The robot's IP subnet part + ] + + for host_suffix in common_hosts: + ip = f"{base_ip}{host_suffix}" + if self._check_audio_server(ip): + return + + # If not found, start a more comprehensive scan in background + Thread(target=self._comprehensive_network_scan, args=(base_ip,), daemon=True).start() + + def _comprehensive_network_scan(self, base_ip): + """More comprehensive network scan in background""" + pub.sendMessage('log', msg='[Speech] Starting comprehensive network scan (this may take a while)...') + + # Scan the first 20 IPs in the network + for i in range(1, 21): + ip = f"{base_ip}{i}" + if self._check_audio_server(ip): + return + + pub.sendMessage('log:error', msg='[Speech] Could not find Bluetooth audio server. Please enter IP manually.') + + def _check_audio_server(self, ip): + """Check if an IP has the audio server running""" + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.5) + result = sock.connect_ex((ip, self.bluetooth_port)) + sock.close() + + if result == 0: + pub.sendMessage('log', msg=f'[Speech] Found audio server at {ip}') + self.bluetooth_server = ip + Thread(target=self._connect_bluetooth, daemon=True).start() + return True + except: + pass + return False + + def _connect_bluetooth(self): + """Connect to Bluetooth audio server for speech input""" + attempts = 0 + max_attempts = 10 # Maximum number of attempts before giving up + + while not self.bt_connected and attempts < max_attempts: + attempts += 1 + try: + if not self.bluetooth_server: + pub.sendMessage('log:error', msg='[Speech] No Bluetooth server specified') + sleep(self.reconnect_timeout) + continue + + pub.sendMessage('log', msg=f'[Speech] Connecting to Bluetooth server at {self.bluetooth_server}:{self.bluetooth_port} (attempt {attempts})') + + # Close any existing socket + if self.bt_socket: + try: + self.bt_socket.close() + except: + pass + + self.bt_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.bt_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.bt_socket.settimeout(10.0) # Add a connection timeout + self.bt_socket.connect((self.bluetooth_server, self.bluetooth_port)) + self.bt_connected = True + pub.sendMessage('log', msg=f'[Speech] Connected to Bluetooth server at {self.bluetooth_server}') + + # Reset socket timeout for normal operations + self.bt_socket.settimeout(None) + + # Send language preference to server + try: + language_data = {'command': 'set_language', 'language': self.language} + pub.sendMessage('log', msg=f'[Speech] Sending language setting: {self.language}') + self.bt_socket.sendall(pickle.dumps(language_data)) + pub.sendMessage('log', msg=f'[Speech] Set language to {self.language}') + except Exception as e: + pub.sendMessage('log:error', msg=f'[Speech] Failed to set language: {e}') + + Thread(target=self._receive_bluetooth_audio, daemon=True).start() + return # Successfully connected + except Exception as e: + pub.sendMessage('log:error', msg=f'[Speech] Failed to connect to Bluetooth server: {e}') + sleep(self.reconnect_timeout) + + if not self.bt_connected: + pub.sendMessage('log:error', msg=f'[Speech] Failed to connect to Bluetooth server after {max_attempts} attempts') + + def _receive_bluetooth_audio(self): + """Receive speech recognition results from Bluetooth server""" + pub.sendMessage('log', msg='[Speech] Starting to receive audio from Bluetooth server') + receive_failures = 0 + max_failures = 5 # Maximum consecutive receive failures before reconnecting + connection_heartbeat_time = time.time() # Track last successful interaction + heartbeat_interval = 30 # Send heartbeat every 30 seconds + + while self.bt_connected: + try: + # Check if we need to send a heartbeat to keep the connection alive + current_time = time.time() + if current_time - connection_heartbeat_time > heartbeat_interval: + try: + # Simple heartbeat packet - just a simple peek to check connection + self.bt_socket.settimeout(2.0) + self.bt_socket.sendall(pickle.dumps({'command': 'heartbeat'})) + connection_heartbeat_time = current_time + pub.sendMessage('log', msg='[Speech] Sent heartbeat packet') + except Exception as e: + pub.sendMessage('log:error', msg=f'[Speech] Heartbeat failed: {e}') + # Don't increment failure count for heartbeat failures + + # Receive speech recognition data + pub.sendMessage('log', msg='[Speech] Waiting for data from Bluetooth server') + + # Set a timeout for receiving data + self.bt_socket.settimeout(30.0) # 30 second timeout for receiving data + + try: + data_size_bytes = self.bt_socket.recv(4) + if not data_size_bytes: + pub.sendMessage('log:error', msg='[Speech] Connection closed by server') + raise ConnectionError("Connection closed by server") + + # Update heartbeat time on successful receive + connection_heartbeat_time = time.time() + + data_size = int.from_bytes(data_size_bytes, byteorder='big') + pub.sendMessage('log', msg=f'[Speech] Receiving data of size {data_size} bytes') + + if data_size == 0: + continue + + data = b'' + self.bt_socket.settimeout(10.0) # 10 second timeout for chunk reception + + # Implement better error handling for receiving data chunks + while len(data) < data_size: + chunk_size = min(4096, data_size - len(data)) + try: + packet = self.bt_socket.recv(chunk_size) + if not packet: + pub.sendMessage('log:error', msg='[Speech] Connection lost while receiving data') + raise ConnectionError("Empty packet received") + data += packet + except socket.timeout: + pub.sendMessage('log', msg='[Speech] Timeout while receiving chunk, retrying...') + # Don't break on timeouts, just retry a few times + retry_count = 0 + max_retries = 3 + while retry_count < max_retries: + try: + retry_count += 1 + packet = self.bt_socket.recv(chunk_size) + if packet: + data += packet + break + except socket.timeout: + pub.sendMessage('log', msg=f'[Speech] Retry {retry_count} failed') + + # If we've tried enough and still no data, raise exception + if retry_count >= max_retries: + raise ConnectionError("Maximum retries exceeded") + + # Update heartbeat time after successful data reception + connection_heartbeat_time = time.time() + + if len(data) == data_size: + try: + speech_data = pickle.loads(data) + if 'text' in speech_data: + text = speech_data['text'] + pub.sendMessage('log', msg=f'[Speech BT] I heard: {text}') + if self.listening: + pub.sendMessage('speech', text=text.lower()) + else: + pub.sendMessage('log', msg='[Speech] Received speech but not listening') + + # Reset failure counter on successful receive + receive_failures = 0 + else: + pub.sendMessage('log:error', msg=f'[Speech] Received invalid speech data: {speech_data}') + receive_failures += 1 + except pickle.UnpicklingError as pe: + pub.sendMessage('log:error', msg=f'[Speech] Failed to unpickle data: {pe}') + receive_failures += 1 + else: + pub.sendMessage('log:error', msg=f'[Speech] Incomplete data received: got {len(data)} bytes, expected {data_size}') + receive_failures += 1 + except socket.timeout: + pub.sendMessage('log', msg='[Speech] Socket timeout while waiting for data, continuing...') + # Socket timeouts shouldn't immediately disconnect + if time.time() - connection_heartbeat_time > 120: # 2 minutes with no activity + pub.sendMessage('log:error', msg='[Speech] No data received for too long, reconnecting...') + raise ConnectionError("Extended period with no data") + + except ConnectionError as ce: + pub.sendMessage('log:error', msg=f'[Speech] Connection error: {ce}') + receive_failures += 1 + except Exception as e: + pub.sendMessage('log:error', msg=f'[Speech] Bluetooth receive error: {e}') + receive_failures += 1 + except Exception as e: + pub.sendMessage('log:error', msg=f'[Speech] Outer exception loop: {e}') + receive_failures += 1 + + # If too many failures, reconnect + if receive_failures >= max_failures: + pub.sendMessage('log:error', msg=f'[Speech] Too many receive failures ({receive_failures}), reconnecting') + self.bt_connected = False + # Close socket properly before reconnecting + try: + self.bt_socket.close() + except: + pass + self.bt_socket = None + # Reconnect with a slight delay + time.sleep(1) + Thread(target=self._connect_bluetooth, daemon=True).start() + break def start(self): + pub.sendMessage('log', msg='[Speech] Starting recognition') self.listening = True - Thread(target=self.detect, args=()).start() + if self.protocol == 'direct': + Thread(target=self.detect, args=()).start() + elif self.protocol == 'bluetooth': + # Send start listening command to Bluetooth server + try: + if not self.bt_connected: + pub.sendMessage('log', msg='[Speech] Not connected yet, establishing connection') + Thread(target=self._connect_bluetooth, daemon=True).start() + # Wait briefly for connection to establish + sleep(3) + + if self.bt_connected: + pub.sendMessage('log', msg='[Speech] Sending start_listening command') + command = {'command': 'start_listening'} + self.bt_socket.sendall(pickle.dumps(command)) + pub.sendMessage('log', msg='[Speech] Listening command sent') + else: + pub.sendMessage('log:error', msg='[Speech] Cannot start - not connected') + # Try to reconnect + Thread(target=self._connect_bluetooth, daemon=True).start() + except Exception as e: + pub.sendMessage('log:error', msg=f'[Speech] Error starting recognition: {e}') + self.bt_connected = False + Thread(target=self._connect_bluetooth, daemon=True).start() return self def get_device_index(self, device_name): @@ -50,10 +392,8 @@ def detect(self): while self.listening: try: audio = self.recognizer.listen(source)#, timeout=10, phrase_time_limit=5) - # pub.sendMessage('led', identifiers='top5', color='white') - # pub.sendMessage('log', msg='[Speech] End Detection') - val = self.recognizer.recognize_google(audio) + val = self.recognizer.recognize_google(audio, language=self.language) pub.sendMessage('log', msg='[Speech] I heard: ' + str(val)) pub.sendMessage('speech', text=val.lower()) except sr.WaitTimeoutError as e: @@ -61,12 +401,16 @@ def detect(self): except sr.UnknownValueError as e: pass # pub.sendMessage('log:error', msg='[Speech] Detection Error: ' + str(e)) - # finally: - # pub.sendMessage('led', identifiers='top5', color='off') def stop(self): self.listening = False pub.sendMessage('log', msg='[Speech] Stopping') + if self.protocol == 'bluetooth' and self.bt_connected: + try: + command = {'command': 'stop_listening'} + self.bt_socket.sendall(pickle.dumps(command)) + except: + self.bt_connected = False # allow script to be run directly if __name__ == '__main__': @@ -78,4 +422,4 @@ def stop(self): from logwrapper import LogWrapper log = LogWrapper(path=os.path.dirname(__file__)) speech = SpeechInput() - speech.start() \ No newline at end of file + speech.start() diff --git a/modules/audio/tts.py b/modules/audio/tts.py index 0c6e0a9f..9fb82bfd 100644 --- a/modules/audio/tts.py +++ b/modules/audio/tts.py @@ -1,21 +1,36 @@ from pubsub import pub from time import sleep import pyttsx3 - from elevenlabs import ElevenLabs, VoiceSettings, play import os +import socket +import pickle +import threading +import sys +import traceback + +# Add zeroconf for service discovery +try: + from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf + ZEROCONF_AVAILABLE = True +except ImportError: + ZEROCONF_AVAILABLE = False + pub.sendMessage('log:error', msg='[TTS] Zeroconf not available. Auto-discovery disabled.') class TTS: def __init__(self, **kwargs): """ TTS class - :param kwargs: translator, service, voice_id + :param kwargs: translator, service, voice_id, protocol, bluetooth_server, auto_discover :param translator: translator object - :param service: pyttsx3 or elevenlabs + :param service: pyttsx3, elevenlabs, or bluetooth :param voice_id: elevenlabs voice id + :param protocol: direct or bluetooth + :param bluetooth_server: IP address of the Bluetooth audio server + :param auto_discover: whether to automatically discover Bluetooth devices - Install: pip install pyttsx3 elevenlabs + Install: pip install pyttsx3 elevenlabs zeroconf Requires API key environment variable ELEVENLABS_KEY or use pyttsx3 @@ -27,33 +42,207 @@ def __init__(self, **kwargs): """ self.translator = kwargs.get('translator', None) self.service = kwargs.get('service', 'pyttsx3') - if self.service == 'elevenlabs': - self.init_elevenlabs(kwargs.get('voice_id', '')) + self.protocol = kwargs.get('protocol', 'direct') + self.bluetooth_server = kwargs.get('bluetooth_server', '') + self.bluetooth_port = kwargs.get('bluetooth_port', 8095) + self.auto_discover = kwargs.get('auto_discover', True) + self.reconnect_attempts = kwargs.get('reconnect_attempts', 3) + self.bt_socket = None + self.bt_connected = False + self.debug = kwargs.get('debug', False) + self.speaking = False + + # Publish initialization information + pub.sendMessage('log', msg=f"[TTS] Initializing with protocol: {self.protocol}, service: {self.service}") + if self.protocol == 'bluetooth': + pub.sendMessage('log', msg=f"[TTS] Bluetooth server: {self.bluetooth_server}") + + if self.protocol == 'bluetooth': + # If auto-discover is enabled and server is not specified, try to discover + if self.auto_discover and not self.bluetooth_server and ZEROCONF_AVAILABLE: + self.discover_bluetooth_server() + else: + self.init_bluetooth() else: - self.init_pyttsx3() + if self.service == 'elevenlabs': + self.init_elevenlabs(kwargs.get('voice_id', '')) + else: + self.init_pyttsx3(kwargs.get('voice_index', 0)) + # Set subscribers pub.subscribe(self.speak, 'tts') + # For diagnostic testing + pub.subscribe(self.test_connection, 'tts:test') - def speak(self, msg): - if self.service == 'elevenlabs': - self.speak_elevenlabs(msg) + def discover_bluetooth_server(self): + """Try to discover the Bluetooth audio server using zeroconf""" + if not ZEROCONF_AVAILABLE: + pub.sendMessage('log:error', msg='[TTS] Cannot auto-discover: zeroconf not available') + self.init_bluetooth() # Try regular connection anyway + return + + found_servers = [] + + def on_service_state_change(zeroconf, service_type, name, state_change): + if state_change is ServiceStateChange.Added: + info = zeroconf.get_service_info(service_type, name) + if info: + addresses = [socket.inet_ntoa(addr) for addr in info.addresses] + if addresses: + server_data = { + 'address': addresses[0], + 'port': info.port, + 'name': info.server + } + found_servers.append(server_data) + pub.sendMessage('log', msg=f'[TTS] Discovered Bluetooth audio server: {server_data}') + + pub.sendMessage('log', msg='[TTS] Looking for Bluetooth audio servers...') + zeroconf = Zeroconf() + browser = ServiceBrowser(zeroconf, "_sentryaudio._tcp.local.", handlers=[on_service_state_change]) + + # Give some time for discovery + sleep(3) + zeroconf.close() + + if found_servers: + # Use the first server found + self.bluetooth_server = found_servers[0]['address'] + pub.sendMessage('log', msg=f'[TTS] Auto-selected Bluetooth server: {self.bluetooth_server}') + self.init_bluetooth() else: - self.speak_pyttsx3(msg) + # Fallback to local network scanning + self._scan_local_network() + + def _scan_local_network(self): + """Scan local network for potential Bluetooth audio servers""" + pub.sendMessage('log', msg='[TTS] Scanning local network for audio servers...') + + # Get local IP to determine network + local_ip = '' + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + local_ip = s.getsockname()[0] + s.close() + except: + local_ip = '127.0.0.1' + + # Extract network prefix + ip_parts = local_ip.split('.') + base_ip = f"{ip_parts[0]}.{ip_parts[1]}.{ip_parts[2]}." + + # Scan common IPs first + common_hosts = [ + '1', '2', '100', '101', '200', '254', # Common router/device IPs + '137.52' # The robot's IP subnet part + ] + + for host_suffix in common_hosts: + ip = f"{base_ip}{host_suffix}" + if self._check_audio_server(ip): + return - def init_pyttsx3(self): - engine = pyttsx3.init() - voices = engine.getProperty('voices') - #rate = engine.getProperty('rate') - #engine.setProperty('rate', rate+100) - #for i in voices: - engine.setProperty('voice', voices[10].id) - print('voice' + voices[10].id) - #engine.say('Hello, World!') - #engine.runAndWait() - self.engine = engine + # If not found, start a more comprehensive scan in background + threading.Thread(target=self._comprehensive_network_scan, args=(base_ip,), daemon=True).start() + + def _comprehensive_network_scan(self, base_ip): + """More comprehensive network scan in background""" + pub.sendMessage('log', msg='[TTS] Starting comprehensive network scan (this may take a while)...') + + # Scan the first 20 IPs in the network + for i in range(1, 21): + ip = f"{base_ip}{i}" + if self._check_audio_server(ip): + return + + pub.sendMessage('log:error', msg='[TTS] Could not find Bluetooth audio server. Please enter IP manually.') + # Initialize with pyttsx3 as fallback + self.protocol = 'direct' + self.init_pyttsx3() + + def _check_audio_server(self, ip): + """Check if an IP has the audio server running""" + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.5) + result = sock.connect_ex((ip, self.bluetooth_port)) + sock.close() + + if result == 0: + pub.sendMessage('log', msg=f'[TTS] Found audio server at {ip}') + self.bluetooth_server = ip + self.init_bluetooth() + return True + except: + pass + return False + + def speak(self, msg): + """Process a TTS request and route to appropriate method based on protocol""" + self.speaking = True + # Notify that speech is starting (for UI feedback) + pub.sendMessage('tts:speaking', message=msg) + + try: + if self.protocol == 'bluetooth': + self.speak_bluetooth(msg) + else: + if self.service == 'elevenlabs': + self.speak_elevenlabs(msg) + else: + self.speak_pyttsx3(msg) + except Exception as e: + pub.sendMessage('log:error', msg=f"[TTS] Speak error: {e}") + pub.sendMessage('tts:error', error_msg=str(e)) + + # If bluetooth fails, try to reconnect and fallback to local TTS for this message + if self.protocol == 'bluetooth': + pub.sendMessage('log', msg="[TTS] Bluetooth speech failed, falling back to local TTS") + threading.Thread(target=self.init_bluetooth, daemon=True).start() + + # Try local TTS as fallback + try: + if not hasattr(self, 'engine'): + self.init_pyttsx3() + self.speak_pyttsx3(msg) + except Exception as fallback_error: + pub.sendMessage('log:error', msg=f"[TTS] Fallback TTS error: {fallback_error}") + finally: + self.speaking = False + # Notify that speech has completed + pub.sendMessage('tts:complete') + + def init_pyttsx3(self, voice_index=0): + try: + engine = pyttsx3.init() + voices = engine.getProperty('voices') + + pub.sendMessage('log', msg=f"[TTS] Available voices: {len(voices)}") + if self.debug: + for i, voice in enumerate(voices): + pub.sendMessage('log', msg=f"[TTS] Voice {i}: {voice.name} ({voice.id})") + + if voice_index < len(voices): + engine.setProperty('voice', voices[voice_index].id) + pub.sendMessage('log', msg=f'[TTS] Using voice {voices[voice_index].id}') + else: + engine.setProperty('voice', voices[0].id) + pub.sendMessage('log', msg=f'[TTS] Voice index {voice_index} not found. Using default voice {voices[0].id}') + self.engine = engine + except RuntimeError as e: + if "eSpeak" in str(e): + pub.sendMessage('log:error', msg='[TTS] eSpeak not installed. Install with: sudo apt-get install espeak') + raise RuntimeError("eSpeak not installed. Please install it with: sudo apt-get install espeak") + else: + pub.sendMessage('log:error', msg='[TTS] Error initializing pyttsx3: ' + str(e)) + raise + except Exception as e: + pub.sendMessage('log:error', msg='[TTS] Error initializing pyttsx3: ' + str(e)) + raise def speak_pyttsx3(self, msg): - pub.sendMessage('log', msg="[TTS] {}".format(msg)) + pub.sendMessage('log', msg=f"[TTS] Speaking with pyttsx3: {msg[:50]}...") if self.translator is not None: msg = self.translator.request(msg) self.engine.say(msg) @@ -66,12 +255,16 @@ def init_elevenlabs(self, voice_id): self.voice_id = voice_id def speak_elevenlabs(self, msg): + pub.sendMessage('log', msg="[TTS] {}".format(msg)) + if self.translator is not None: + msg = self.translator.request(msg) + # This uses ElevenLabs, create an API key and export in your .bashrc file using `export ELEVENLABS_KEY=` before use output = self.client.text_to_speech.convert( voice_id=self.voice_id, optimize_streaming_latency="0", output_format="mp3_22050_32", - text="msg", + text=msg, # Fixed: Was using "msg" string literal instead of the variable voice_settings=VoiceSettings( stability=0.1, similarity_boost=0.3, @@ -80,6 +273,185 @@ def speak_elevenlabs(self, msg): ) play(output) + + def init_bluetooth(self): + """Initialize Bluetooth connection to laptop audio server""" + pub.sendMessage('log', msg="[TTS] Initializing Bluetooth connection...") + self.bt_connected = False + + # Close any existing connection + if self.bt_socket: + try: + self.bt_socket.close() + except: + pass + self.bt_socket = None + + threading.Thread(target=self._maintain_bt_connection, daemon=True).start() + + def _maintain_bt_connection(self): + """Maintain the Bluetooth connection in background thread""" + attempts = 0 + max_attempts = self.reconnect_attempts + + while attempts < max_attempts or max_attempts <= 0: + if not self.bt_connected: + try: + if not self.bluetooth_server: + pub.sendMessage('log:error', msg='[TTS] No Bluetooth server specified') + sleep(5) + attempts += 1 + continue + + pub.sendMessage('log', msg=f"[TTS] Connecting to {self.bluetooth_server}:{self.bluetooth_port}") + + # Close any existing socket + if self.bt_socket: + try: + self.bt_socket.close() + except: + pass + + self.bt_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.bt_socket.settimeout(5.0) # Set connection timeout + self.bt_socket.connect((self.bluetooth_server, self.bluetooth_port)) + self.bt_connected = True + attempts = 0 # Reset attempts counter on successful connection + pub.sendMessage('log', msg=f"[TTS] Connected to Bluetooth audio server at {self.bluetooth_server}") + + # Test the connection + try: + test_data = {'type': 'tts', 'text': 'Connection test'} + self.bt_socket.sendall(pickle.dumps(test_data)) + pub.sendMessage('log', msg=f"[TTS] Connection test successful") + # Notify for UI update + pub.sendMessage('tts:bluetooth_connected', server=self.bluetooth_server) + except Exception as e: + pub.sendMessage('log:error', msg=f"[TTS] Connection test failed: {e}") + self.bt_connected = False + continue + + except Exception as e: + pub.sendMessage('log', msg=f"[TTS] Failed to connect to Bluetooth audio server: {e}") + sleep(5) # Retry after 5 seconds + attempts += 1 + + # Reset socket timeout for normal operations + if self.bt_socket and self.bt_connected: + try: + self.bt_socket.settimeout(None) + except: + pass + + sleep(1) + + # If we exit the loop due to max attempts, fall back to local TTS + if not self.bt_connected: + pub.sendMessage('log', msg="[TTS] Max reconnection attempts reached. Falling back to local TTS.") + pub.sendMessage('tts:bluetooth_failed') + self.protocol = 'direct' + self.init_pyttsx3() + + def speak_bluetooth(self, msg): + """Send text to Bluetooth server for audio output""" + if not self.bt_connected or not self.bt_socket: + pub.sendMessage('log:error', msg="[TTS] Not connected to Bluetooth server. Falling back to pyttsx3.") + if not hasattr(self, 'engine'): + self.init_pyttsx3() + self.speak_pyttsx3(msg) + + # Try to reconnect in the background + pub.sendMessage('log', msg="[TTS] Attempting to reconnect to Bluetooth server...") + threading.Thread(target=self.init_bluetooth, daemon=True).start() + return + + try: + # Log the message we're sending (up to 50 chars) + pub.sendMessage('log', msg=f"[TTS Bluetooth] Sending: {msg[:50]}...") + if self.translator is not None: + msg = self.translator.request(msg) + + # Send TTS request to Bluetooth audio server + data = { + 'type': 'tts', + 'text': msg + } + + # Serialize the data with improved error handling + try: + pickled_data = pickle.dumps(data) + except Exception as pickle_error: + pub.sendMessage('log:error', msg=f"[TTS] Error serializing message: {pickle_error}") + raise + + # Set a timeout to ensure we don't hang indefinitely + self.bt_socket.settimeout(5.0) + + # Send data with better error handling + try: + pub.sendMessage('log', msg=f"[TTS] Sending {len(pickled_data)} bytes of data to Bluetooth server") + sent_bytes = 0 + total_bytes = len(pickled_data) + + # Loop until all data is sent or an error occurs + while sent_bytes < total_bytes: + bytes_sent = self.bt_socket.send(pickled_data[sent_bytes:]) + if bytes_sent == 0: + raise ConnectionError("Socket connection broken") + sent_bytes += bytes_sent + + pub.sendMessage('log', msg=f"[TTS] Data sent successfully: {sent_bytes} bytes") + + # Wait for acknowledgement from server (optional) + try: + self.bt_socket.settimeout(2.0) # Short timeout for ack + ack_data = self.bt_socket.recv(1024) + if ack_data: + try: + ack = pickle.loads(ack_data) + if ack.get('status') == 'ok': + pub.sendMessage('log', msg="[TTS] Server acknowledged TTS request") + except: + # If we can't parse the ack, just ignore it + pass + except socket.timeout: + # No ack received, but that's okay + pass + + except socket.timeout: + pub.sendMessage('log:error', msg="[TTS] Timeout while sending data") + raise ConnectionError("Send timeout") + except (BrokenPipeError, ConnectionResetError) as conn_error: + pub.sendMessage('log:error', msg=f"[TTS] Connection broken while sending: {conn_error}") + raise ConnectionError("Connection broken") + + # Reset timeout to default + self.bt_socket.settimeout(None) + except Exception as e: + pub.sendMessage('log:error', msg=f"[TTS] Bluetooth send error: {e}") + self.bt_connected = False + + # Try to reconnect + pub.sendMessage('log', msg="[TTS] Attempting to reconnect...") + threading.Thread(target=self.init_bluetooth, daemon=True).start() + + # Fall back to local TTS + if not hasattr(self, 'engine'): + self.init_pyttsx3() + self.speak_pyttsx3(msg) + + def test_connection(self): + """Test the TTS connection (used for diagnostics)""" + if self.protocol == 'bluetooth': + if self.bt_connected: + pub.sendMessage('log', msg=f"[TTS] Testing Bluetooth connection to {self.bluetooth_server}:{self.bluetooth_port}") + self.speak("Connection test successful") + else: + pub.sendMessage('log', msg="[TTS] Bluetooth not connected, attempting to connect...") + self.init_bluetooth() + else: + pub.sendMessage('log', msg=f"[TTS] Using local TTS ({self.service})") + self.speak("Local TTS test") if __name__ == '__main__': tts = TTS() @@ -87,4 +459,7 @@ def speak_elevenlabs(self, msg): tts2 = TTS(service='elevenlabs', voice_id='pMsXgVXv3BLzUgSXRplE') tts2.speak('Test') - + + tts3 = TTS(protocol='bluetooth', bluetooth_server='127.0.0.1') + tts3.speak('Bluetooth test') + diff --git a/modules/behaviours/__init__.py b/modules/behaviours/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/modules/behaviours/faces.py b/modules/behaviours/faces.py index a6e7b564..4fd94e69 100644 --- a/modules/behaviours/faces.py +++ b/modules/behaviours/faces.py @@ -1,7 +1,8 @@ from random import randint, randrange from pubsub import pub from datetime import datetime, timedelta - +import os +import json from modules.config import Config class Faces: @@ -11,24 +12,136 @@ def __init__(self, state): self.last_face_name = None self.current_faces = [] self.face_detected = None + self.priority_animations = {} # Öncelikli kişiler için animasyonlar + self.priority_file = "priority_animations.json" # Konfigürasyon dosyası + + # Abonelikler pub.subscribe(self.face, 'vision:detect:face') pub.subscribe(self.noface, 'vision:nomatch') + pub.subscribe(self.update_priority_animation, 'priority:update_animation') + pub.subscribe(self.handle_priority_person_detected, 'priority:person_detected') + pub.subscribe(self.add_priority_person, 'priority:add_person') + pub.subscribe(self.remove_priority_person, 'priority:remove_person') + + # Kayıtlı öncelikli kişileri yükle + self.load_priority_animations() + + # Sınıfın aktif olduğunu logla + pub.sendMessage('log:info', msg=f"[Faces] Behaviors initialized with {len(self.priority_animations)} priority animations") + + def load_priority_animations(self): + """Kayıtlı öncelikli kişi animasyonlarını yükle""" + try: + if os.path.exists(self.priority_file): + with open(self.priority_file, 'r') as f: + self.priority_animations = json.load(f) + pub.sendMessage('log:info', msg=f"[Faces] Loaded {len(self.priority_animations)} priority animations") + else: + pub.sendMessage('log:info', msg=f"[Faces] No priority file found, creating new one") + self.priority_animations = {} + # Test için varsayılan bir kişi ekle + self.priority_animations["Test"] = "RAINBOW" + self.save_priority_animations() + except Exception as e: + pub.sendMessage('log:error', msg=f"[Faces] Error loading priority animations: {e}") + self.priority_animations = {} + + def save_priority_animations(self): + """Öncelikli kişi animasyonlarını kaydet""" + try: + with open(self.priority_file, 'w') as f: + json.dump(self.priority_animations, f, indent=2) + pub.sendMessage('log:info', msg=f"[Faces] Saved {len(self.priority_animations)} priority animations") + except Exception as e: + pub.sendMessage('log:error', msg=f"[Faces] Error saving priority animations: {e}") + + def add_priority_person(self, person_name): + """Öncelikli kişi ekle""" + if person_name not in self.priority_animations: + self.priority_animations[person_name] = "RAINBOW_CYCLE" # Varsayılan animasyon + pub.sendMessage('log:info', msg=f"[Faces] Added priority person: {person_name}") + self.save_priority_animations() + return True + return False + + def remove_priority_person(self, person_name): + """Öncelikli kişiyi kaldır""" + if person_name in self.priority_animations: + del self.priority_animations[person_name] + pub.sendMessage('log:info', msg=f"[Faces] Removed priority person: {person_name}") + self.save_priority_animations() + return True + return False + + def update_priority_animation(self, person_name, animation_name): + """GUI'den öncelikli kişi-animasyon eşleştirmesi güncelleme""" + self.priority_animations[person_name] = animation_name + pub.sendMessage('log:info', msg=f"[Faces] Updated animation for {person_name}: {animation_name}") + self.save_priority_animations() + + def handle_priority_person_detected(self, person_name, animation_name="RAINBOW_CYCLE"): + """Öncelikli kişi algılandığında çağrılır""" + pub.sendMessage('log:info', msg=f"[Faces] Priority person detected: {person_name}, animation: {animation_name}") + + # Kişi öncelikli değilse ekle + if person_name not in self.priority_animations: + self.add_priority_person(person_name) + # Animasyonu güncelle + self.update_priority_animation(person_name, animation_name) + + # Animasyonu çalıştır + pub.sendMessage('led:animate', animation=animation_name, color="GREEN", repeat=2) + + # Göz rengini yeşil yap + self.state.set_eye('green') + + # Selam ver + greeting = f"Merhaba {person_name}" + pub.sendMessage('tts', msg=greeting) def noface(self): + """Yüz algılanmadığında çağrılır""" if self.face_detected: - pub.sendMessage('log:info', msg='[Personality] No face matches found') + pub.sendMessage('log:info', msg='[Faces] No face matches found') self.face_detected = False + + # Göz rengini mavi yap (resting) + self.state.set_eye('blue') def face(self, name): + """Yüz algılandığında çağrılır (vision:detect:face olayıyla)""" if not self.face_detected: - pub.sendMessage('log:info', msg='[Personality] Face detected: ' + str(name)) + pub.sendMessage('log:info', msg=f'[Faces] Face detected: {name}') + self.face_detected = True self.last_face = datetime.now() - # self.state.set_state(Config.STATE_IDLE) + + # Göz rengini yeşil yap self.state.set_eye('green') + + # Öncelikli kişi için animasyon kontrolü + is_priority = name in self.priority_animations + if is_priority: + animation_name = self.priority_animations[name] + pub.sendMessage('log:info', msg=f'[Faces] Running priority animation for {name}: {animation_name}') + # Animasyonu çalıştır + pub.sendMessage('led:animate', animation=animation_name, color="GREEN", repeat=2) + + # Servo için takip etme komutunu da gönder + pub.sendMessage('priority_person:detected', name=name) + + # Yeni bir kişi algılandıysa, karşılama yap if name not in self.current_faces: self.current_faces.append(name) - #pub.sendMessage('speak', msg='hi') - pub.sendMessage('tts', msg='hello there') - if name != 'unknown': - self.last_face_name = name \ No newline at end of file + + # Öncelikli kişi değilse varsayılan animasyon + if not is_priority: + pub.sendMessage('led:animate', animation='SPINNER', color='BLUE', repeat=1) + + # Selamlama + if name.lower() != 'unknown': + greeting = f"Merhaba {name}" + pub.sendMessage('tts', msg=greeting) + + if name.lower() != 'unknown': + self.last_face_name = name diff --git a/modules/behaviours/feel.py b/modules/behaviours/feel.py index 86d2ca72..2bf38935 100644 --- a/modules/behaviours/feel.py +++ b/modules/behaviours/feel.py @@ -56,22 +56,38 @@ def loop_minute(self): pub.sendMessage('led', identifiers='status3', color=self.attention, gradient='bg') pub.sendMessage('led', identifiers='status4', color=self.happiness, gradient='bg') + # Feel sınıfına yeni metodlar ekleyin + def get_feelings(self): feelings = [] if self.attention > 90 and self.wakefulness > 90: feelings.append('excited') + # Heyecan durumunda RAINBOW animasyonu + pub.sendMessage('led:animate', animation='RAINBOW_CYCLE', repeat=1) if self.happiness < 10: feelings.append('sad') + # Üzgün durumda mavi breathe animasyonu + pub.sendMessage('led:animate', animation='BREATHE', color='BLUE', repeat=1) if self.attention < 30: feelings.append('bored') + # Sıkılma durumunda STACKED_BARS animasyonu + pub.sendMessage('led:animate', animation='STACKED_BARS', repeat=1) if self.wakefulness < 20: feelings.append('tired') + # Yorgunluk durumunda PULSE animasyonu + pub.sendMessage('led:animate', animation='PULSE', color='PURPLE', repeat=1) if self.wakefulness < 0: feelings.append('asleep') + # Uyku durumunda WAVE animasyonu (yavaş ve sakin) + pub.sendMessage('led:animate', animation='WAVE', repeat=1) if self.contentment < 20: feelings.append('restless') + # Huzursuz durumda METEOR animasyonu + pub.sendMessage('led:animate', animation='METEOR', color='ORANGE', repeat=1) if len(feelings) == 0: feelings.append('ok') + # Normal durum için sakin bir BREATHE animasyonu + pub.sendMessage('led:animate', animation='BREATHE', color='GREEN', repeat=1) return feelings def input(self, input_type): @@ -132,4 +148,4 @@ def puppet(self): # self.input(Feel.INPUT_TYPE_COMPANY) # # def face(self, name): - # self.input(Feel.INPUT_TYPE_INTERESTING) \ No newline at end of file + # self.input(Feel.INPUT_TYPE_INTERESTING) diff --git a/modules/behaviours/objects.py b/modules/behaviours/objects.py index fab94352..39540db3 100644 --- a/modules/behaviours/objects.py +++ b/modules/behaviours/objects.py @@ -1,6 +1,8 @@ from random import randint, randrange from pubsub import pub from datetime import datetime, timedelta +import json +import os from modules.config import Config @@ -9,8 +11,43 @@ def __init__(self, state): self.state = state # the personality instance self.last_detection = None self.is_detected = None + self.priority_objects = ['person', 'face', 'phone', 'laptop'] # Öncelikli nesneler listesi + self.object_colors = { + 'person': 'blue', + 'face': 'purple', + 'phone': 'yellow', + 'laptop': 'cyan', + 'book': 'white', + 'bottle': 'red' + } pub.subscribe(self.object, 'vision:detect:object') pub.subscribe(self.noobject, 'vision:nomatch') + pub.subscribe(self.update_priority_objects, 'update:priority_objects') + + def update_priority_objects(self, priority_objects=None, object_colors=None): + """Öncelikli objeleri günceller""" + if priority_objects: + self.priority_objects = priority_objects + if object_colors: + self.object_colors = object_colors + pub.sendMessage('log:info', msg=f'[Personality] Updated priority objects: {len(self.priority_objects)} objects') + + def is_priority_object(self, name): + """Bir nesnenin öncelikli olup olmadığını kontrol eder""" + return name in self.priority_objects + + def get_color_for_object(self, name): + """Nesne için LED rengi döndürür""" + # Eğer nesne için özel bir renk tanımlanmışsa onu kullan + if name in self.object_colors: + return self.object_colors[name] + + # Öncelikli nesneler için varsayılan renk + if self.is_priority_object(name): + return 'purple' + + # Diğer tüm nesneler için + return 'purple' # Varsayılan renk def noobject(self): if self.is_detected: @@ -23,4 +60,13 @@ def object(self, name): self.is_detected = True self.last_detection = datetime.now() # self.state.set_state(Config.STATE_IDLE) - self.state.set_eye('purple') + + # Nesne için LED rengini ayarla + led_color = self.get_color_for_object(name) + + # Öncelikli nesne durumunda log at + if self.is_priority_object(name): + pub.sendMessage('log:info', msg=f'[Personality] Priority object detected: {name}, color: {led_color}') + + # Göz rengini güncelle + self.state.set_eye(led_color) diff --git a/modules/behaviours/respond.py b/modules/behaviours/respond.py index 1bd234ae..8c5beade 100644 --- a/modules/behaviours/respond.py +++ b/modules/behaviours/respond.py @@ -13,22 +13,28 @@ def __init__(self, state): def speech(self, text): if self.state.is_resting(): return - + + # Konuşma algılandığında LED animasyonu göster + pub.sendMessage('led:animate', animation='PULSE', color='CYAN', repeat=1) + action = None if 'are you sure' in text: action = 'head_nod' + # Soru sorulduğunda farklı LED animasyonu + pub.sendMessage('led:animate', animation='ALTERNATING', color='YELLOW', color2='WHITE', repeat=2) if 'you like' in text: actions = ['head_shake', 'head_nod', 'speak'] - action = actions[abs(hash(text.split('like ')[1])) % len(actions)-1] # choose from the number of actions by hashing the item, so the answer is always the same - # action = actions[randrange(len(actions) - 1)] - + action = actions[abs(hash(text.split('like ')[1])) % len(actions)-1] + # Beğeni hakkında soru sorulduğunda özel animasyon + pub.sendMessage('led:animate', animation='COLOR_WIPE', color='PINK', repeat=1) + if action: pub.sendMessage('log', msg='[Personality] Respond action: ' + str(action)) - if action is 'speak': + if action == 'speak': pub.sendMessage('speak', msg=text) else: pub.sendMessage('animate', action=action) - + def tracking(self, largest, screen): """ Show the position of the largest match in the eye LEDs @@ -45,4 +51,4 @@ def tracking(self, largest, screen): pub.sendMessage('led', identifiers='left', color='green') else: pub.sendMessage('led', identifiers=['left', 'right'], color='off') - pub.sendMessage('led', identifiers='middle', color='green') \ No newline at end of file + pub.sendMessage('led', identifiers='middle', color='green') diff --git a/modules/neopixel/neopx.py b/modules/neopixel/neopx.py index 807a3fa8..4d1b4f34 100644 --- a/modules/neopixel/neopx.py +++ b/modules/neopixel/neopx.py @@ -1,10 +1,22 @@ from pubsub import pub from time import sleep +import time # Add this import for time.time() from colour import Color import board -from modules.network.arduinoserial import set_led_pin - import threading +import sys +import os +import json +import paho.mqtt.client as mqtt + +# Add path to SerialShareManager +if not hasattr(sys, "_called_from_test") and not (hasattr(sys, "argv") and "pytest" in sys.argv[0]): + sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))) + +# Global variables for shared ESP32 serial connection +_esp32_shared_serial = None +_esp32_serial_initialized = False +_esp32_serial_lock = threading.RLock() # Add a lock for thread safety class NeoPx: COLOR_OFF = (0, 0, 0) @@ -30,6 +42,30 @@ class NeoPx: 'white_dim': COLOR_WHITE_DIM } + # Add named colors from Arduino for ESP32 compatibility + ARDUINO_COLOR_MAP = { + 'RED': (255, 0, 0), + 'GREEN': (0, 255, 0), + 'BLUE': (0, 0, 255), + 'YELLOW': (255, 255, 0), + 'PURPLE': (255, 0, 255), + 'CYAN': (0, 255, 255), + 'WHITE': (255, 255, 255), + 'ORANGE': (255, 165, 0), + 'PINK': (255, 105, 180), + 'GOLD': (255, 215, 0), + 'TEAL': (0, 128, 128), + 'MAGENTA': (255, 0, 127), + 'LIME': (50, 205, 50), + 'SKY_BLUE': (135, 206, 235), + 'NAVY': (0, 0, 128), + 'MAROON': (128, 0, 0), + 'AQUA': (127, 255, 212), + 'VIOLET': (138, 43, 226), + 'CORAL': (255, 127, 80), + 'TURQUOISE': (64, 224, 208) + } + def __init__(self, **kwargs): """ NeoPx class @@ -65,6 +101,12 @@ def __init__(self, **kwargs): Subscribes to 'speech' to handle speech commands - Argument: msg (string) - speech command + + Subscribes to 'led:animate' to trigger animations (when ESP32 protocol is used) + - Argument: animation (string) - animation name + - Argument: color (string or tuple) - optional color + - Argument: color2 (string or tuple) - optional second color for animations that need it + - Argument: repeat (int) - optional repeat count Example: pub.sendMessage('led', identifiers=1, color='red') @@ -76,7 +118,12 @@ def __init__(self, **kwargs): pub.sendMessage('led:party') pub.sendMessage('exit') pub.sendMessage('speech', msg='light on') + pub.sendMessage('led:animate', animation='RAINBOW', repeat=3) + pub.sendMessage('led:animate', animation='COLOR_WIPE', color='RED', repeat=2) + pub.sendMessage('led:animate', animation='ALTERNATING', color='RED', color2='BLUE', repeat=5) """ + + # Initialise self.count = kwargs.get('count') self.positions = kwargs.get('positions') @@ -88,7 +135,127 @@ def __init__(self, **kwargs): self.animation = False self.thread = None self.overridden = False # prevent any further changes until released (for flashlight) - if kwargs.get('i2c'): + + + self.verbose_logging = kwargs.get('verbose_logging', False) # Add logging control + self.current_processing_mode = 'none' # Track current mode + self.use_mqtt = kwargs.get('mqtt', False) + if self.use_mqtt: + self.protocol = 'MQTT' + self._init_mqtt() + else: + # Protokol parametresi zorunlu ve geçerli olmalı + self.protocol = kwargs.get('protocol', None) + accepted_protocols = ['I2C', 'SPI', 'ESP32', 'GPIO'] + if self.protocol not in accepted_protocols: + raise ValueError("Invalid or missing protocol specified. Choose one of: " + ", ".join(accepted_protocols)) + + self.handle_processing_mode_change({'mode': self.current_processing_mode}) + # Initialize shared serial connection if using ESP32 protocol + global _esp32_shared_serial, _esp32_serial_initialized, _esp32_serial_lock + if not self.use_mqtt and self.protocol == 'ESP32': + import serial + from modules.SerialShareManager import register_serial, send_command + # Open USB serial port (configured via kwargs) + with _esp32_serial_lock: + if _esp32_shared_serial is None: + try: + serial_port = kwargs.get('serial_port', '/dev/ttyUSB0') + baudrate = kwargs.get('baudrate', 115200) + pub.sendMessage('log', msg=f'[NEOPX] Connecting to ESP32 on {serial_port} at {baudrate} baud') + + _esp32_shared_serial = serial.Serial( + serial_port, + baudrate, + timeout=1 + ) + + # Flush any pending data + while _esp32_shared_serial.in_waiting > 0: + _esp32_shared_serial.read(_esp32_shared_serial.in_waiting) + + # Wait a moment for the ESP32 to stabilize + sleep(1.0) # Increased from 0.5 to ensure ESP8266 is ready + + # Send a PING to check if the connection is working + _esp32_shared_serial.write(b"PING\n") + sleep(0.2) # Increased from 0.1 to ensure response is received + + # Read with timeout to prevent hanging + response = "" + start_time = time.time() + while time.time() - start_time < 2.0: # Increased timeout to 2 seconds + if _esp32_shared_serial.in_waiting > 0: + try: + response += _esp32_shared_serial.readline().decode(errors='ignore').strip() + except UnicodeDecodeError: + # Hatalı karakterleri atla + response += _esp32_shared_serial.readline().decode(errors='replace').strip() + if "PONG" in response: + break + + if "PONG" in response: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] ESP32 connection established and verified') + _esp32_serial_initialized = True + + # Register with SerialShareManager + register_serial(_esp32_shared_serial, 'neopx') + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] Registered serial connection with SerialShareManager') + else: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] ESP32 connection established but response was unexpected: {response}') + # Try one more time + _esp32_shared_serial.write(b"PING\n") + sleep(0.5) + if _esp32_shared_serial.in_waiting > 0: + try: + if "PONG" in _esp32_shared_serial.readline().decode(errors='ignore').strip(): + _esp32_serial_initialized = True + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] ESP32 connection verified on second attempt') + + # Register with SerialShareManager + register_serial(_esp32_shared_serial, 'neopx') + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] Registered serial connection with SerialShareManager') + except UnicodeDecodeError: + pass + + except Exception as e: + pub.sendMessage('log', msg=f'[NEOPX] ERROR: Failed to connect to ESP32: {e}') + raise + + self.serial = _esp32_shared_serial + + # Start a thread to process incoming ESP32 messages + self.esp32_thread = threading.Thread(target=self.process_esp32_messages) + self.esp32_thread.daemon = True + self.esp32_thread.start() + + # Now announce that serial is ready for other modules - with longer delay + sleep(1.5) + _esp32_serial_lock.acquire() # Lock to ensure thread safety when accessing global + if _esp32_serial_initialized: + _esp32_serial_lock.release() + pub.sendMessage('esp32:serial_ready') + pub.sendMessage('esp32:serial_global_ready') # Add this broadcast too + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] Shared ESP32 serial connection is ready for other modules') + else: + _esp32_serial_lock.release() + if self.verbose_logging: + pub.sendMessage('log', msg=f'[NEOPX] Serial connection not properly initialized, not broadcasting ready event') + + # Only send test animation if this is the first initialization + if not hasattr(self, 'test_animation_sent'): + self.test_animation_sent = True + self.send_animation_command('METEOR', color='WHITE') + sleep(3) + self.send_animation_command('STACKED_BARS') + elif not self.use_mqtt and self.protocol == 'I2C': + import busio from rainbowio import colorwheel from adafruit_seesaw import seesaw, neopixel @@ -102,9 +269,40 @@ def __init__(self, **kwargs): ss = seesaw.Seesaw(self.i2c, addr=0x60) neo_pin = 15 # Unclear how this is used self.pixels = neopixel.NeoPixel(ss, neo_pin, self.count, brightness = 0.1) - else: + import neopixel_spi as neopixel + spi = board.SPI() + self.pixels = neopixel.NeoPixel_SPI(spi, self.count, brightness=0.1, auto_write=False, pixel_order=neopixel.GRB) + + DELAY = 3 + print("All neopixels OFF") + self.pixels.fill((0,0,0)) + self.pixels.show() + sleep(DELAY) + + print("First neopixel red, last neopixel blue") + self.pixels[0] = (10,0,0) + self.pixels[self.count - 1] = (0,0,10) + self.pixels.show() + sleep(DELAY) + + print("All " + str(self.count) + " neopixels green") + self.pixels.fill((0,10,0)) + self.pixels.show() + sleep(DELAY) + + print("All neopixels OFF") + self.pixels.fill((0,0,0)) + self.pixels.show() + sleep(DELAY) + + print("End of test") +<<<<<<< Updated upstream + else: # GPIO +======= + elif not self.use_mqtt and self.protocol == 'GPIO': +>>>>>>> Stashed changes import neopixel - self.pixels = neopixel.NeoPixel(board.D12, self.count) + self.pixels = neopixel.NeoPixel(kwargs.get('pin'), self.count) # Default states self.set(self.all, NeoPx.COLOR_OFF) sleep(0.1) @@ -120,6 +318,146 @@ def __init__(self, **kwargs): pub.subscribe(self.party, 'led:party') pub.subscribe(self.exit, 'exit') pub.subscribe(self.speech, 'speech') + pub.subscribe(self.handle_animate, 'led:animate') + pub.subscribe(self.handle_animate_pubsub, 'led:animate') # Renamed for clarity + pub.subscribe(self.handle_motion_event_pubsub, 'motion_event') # Subscribe to motion events from GUI/CommandReceiver + pub.subscribe(self.handle_processing_mode_change, 'set_processing_mode') # Subscribe to mode changes + pub.subscribe(self.handle_audio_event_pubsub, 'tts:speaking') # Subscribe to TTS start + pub.subscribe(self.handle_audio_event_pubsub, 'tts:complete') # Subscribe to TTS end + # Add other subscriptions if needed (e.g., speech recognition states) + # pub.subscribe(self.handle_speech_event_pubsub, 'speech:listening') + # pub.subscribe(self.handle_speech_event_pubsub, 'speech:recognized') + + # Test animasyonu (bağlantı kontrolü için) + if not self.use_mqtt and self.protocol == 'ESP32': + pub.sendMessage('log:info', msg=f"[NEOPX] ESP32 bağlantısı kuruldu, test animasyonu başlatılıyor") + self.send_animation_command('METEOR', color='WHITE', repeat=1) + sleep(2) + self.send_animation_command('STACKED_BARS', repeat=1) + + # Renamed from handle_animate for clarity to distinguish from ESP32 method names + def handle_animate_pubsub(self, animation, color=None, color2=None, repeat=1): + """ Handle animation requests from pubsub """ + pub.sendMessage('log:info', msg=f"[NeoPx] PubSub animate request: animation={animation}, color={color}, color2={color2}, repeat={repeat}") + + if self.protocol == 'ESP32': + # Add more detailed debug info + pub.sendMessage('log:info', msg=f"[NeoPx] Sending ESP32 animation command: {animation}") + # Directly call send_animation_command + self.send_animation_command(animation, color, color2, repeat) + else: + # Fall back to standard animation system (if you have one) + pub.sendMessage('log:warning', msg="[NeoPx] Non-ESP32 animation not fully implemented in handle_animate_pubsub") + # color_val = color if color else NeoPx.COLOR_RED # Example fallback + # self.animate(self.all, color_val, animation.lower()) + + def _init_mqtt(self): + try: + self.mqtt_client = mqtt.Client() + self.mqtt_client.on_connect = self.on_mqtt_connect + self.mqtt_client.on_message = self.on_mqtt_message + self.mqtt_client.connect("localhost", 1883, 60) # Gerekirse broker IP/port değiştirin + self.mqtt_client.loop_start() + except Exception as e: + pub.sendMessage('log:error', msg=f"[MQTT] Error initializing MQTT client: {e}") + + def on_mqtt_connect(self, client, userdata, flags, rc): + client.subscribe("led") + client.subscribe("led:full") + client.subscribe("led:animate") + + def on_mqtt_message(self, client, userdata, msg): + try: + payload = json.loads(msg.payload.decode()) + topic = msg.topic + + if topic == 'led': + self.set(payload.get('identifiers'), payload.get('color')) + elif topic == 'led:full': + self.full(payload.get('color')) + elif topic == 'led:animate': + self.handle_animate( + payload.get('animation'), + color=payload.get('color'), + color2=payload.get('color2'), + repeat=payload.get('repeat', 1) + ) + except Exception as e: + print(f"[MQTT] Error handling message: {e}") + + def process_esp32_messages(self): + """ + Process incoming messages from ESP32 + This runs in a separate thread and dispatches events based on message type + """ + if not hasattr(self, 'serial') or self.protocol != 'ESP32' or self.use_mqtt: + return + + while getattr(threading.current_thread(), "running", True): # Check if the thread is still running + try: + if self.serial.in_waiting > 0: + try: + data = self.serial.readline().decode(errors='ignore').strip() + except UnicodeDecodeError: + data = '' + + # Ignore common noise patterns + if data == "or 2." or data == "": + continue + + # Dispatch messages based on prefix + if data.startswith("RFID_CARD"): + parts = data.split() + if len(parts) >= 2: + card_id = parts[1] + pub.sendMessage('rfid:card_raw', card_id=card_id) + if self.verbose_logging: + pub.sendMessage('log', msg=f'[RFID] Card detected: {card_id}') + + elif data.startswith("MOTION_DETECTED"): + parts = data.split() + if len(parts) >= 2: + try: + pin = int(parts[1]) + pub.sendMessage('motion:detected', pin=pin) + if self.verbose_logging: + pub.sendMessage('log', msg=f'[MOTION] Motion detected on pin {pin}') + except ValueError: + pub.sendMessage('log', msg=f'[MOTION] Invalid pin format in message: {data}') + + elif data.startswith("PONG"): + # Handled in main thread + pass + else: + # Only log other messages if they're important (errors, etc) + if len(data) > 3 and not data.isspace() and ("ERROR" in data or "WARN" in data): + pub.sendMessage('log', msg=f'[ESP32] {data}') + + sleep(0.01) # Small delay to prevent CPU hogging + + except Exception as e: + pub.sendMessage('log', msg=f'[ESP32] Error processing message: {e}') + sleep(1) # Longer delay on error + + def stop_esp32_thread(self): + """ + Stop the ESP32 message processing thread gracefully + """ + if self.use_mqtt: + return + if hasattr(self, 'esp32_thread') and self.esp32_thread.is_alive(): + self.esp32_thread.running = False + self.esp32_thread.join() + + def get_shared_serial(self): + """ + Get the shared serial connection for other modules to use + """ + if self.use_mqtt: + return None + if self.protocol == 'ESP32' and hasattr(self, 'serial'): + return self.serial + return None def exit(self): """ @@ -127,9 +465,14 @@ def exit(self): """ if self.animation: self.animation = False - self.thread.join() + if self.thread: + self.thread.join() self.set(self.all, NeoPx.COLOR_OFF) - self.i2c.deinit() + + self.stop_esp32_thread() + if self.protocol == 'I2C': + self.i2c.deinit() + # Don't close serial here, as it's shared sleep(1) def speech(self, text): @@ -138,6 +481,24 @@ def speech(self, text): if 'light off' in text: self.flashlight(False) + def color_to_arduino_format(self, color): + """Convert color to a format understood by Arduino code""" + # If color is a string, check if it's in our Arduino color map + if isinstance(color, str): + # First try Arduino color map (upper case names) + if color.upper() in self.ARDUINO_COLOR_MAP: + return color.upper() + # Then try our standard color map + elif color in self.COLOR_MAP: + # Return the RGB values directly + rgb = self.COLOR_MAP[color] + return f"{rgb[0]},{rgb[1]},{rgb[2]}" + # If it's already an RGB tuple + elif isinstance(color, tuple) and len(color) == 3: + return f"{color[0]},{color[1]},{color[2]}" + # Default to empty string if no valid color format found + return "" + def set(self, identifiers, color, gradient=False): """ Set color of pixel @@ -147,13 +508,22 @@ def set(self, identifiers, color, gradient=False): :param identifiers: pixel number (starting from 0) - can be list :param color: string map of COLOR_MAP or tuple (R, G, B) """ - if self.overridden: - return - # convert single identifier to list - if type(identifiers) is int: + # identifiers'ı her zaman iterable yap + if isinstance(identifiers, int): identifiers = [identifiers] - elif type(identifiers) is str: + elif isinstance(identifiers, str): identifiers = [self.positions[identifiers]] + if self.use_mqtt: + for i in identifiers: + payload = { + "cmd": "SET", + "index": i, + "color": color + } + self.mqtt_client.publish("sentrybot/cmd", json.dumps(payload)) + return + if self.overridden: + return # lookup color if string if type(color) is float: color = int(color) @@ -170,6 +540,16 @@ def set(self, identifiers, color, gradient=False): color = (round(color[0]*100), round(color[1]*100), round(color[2]*100)) # increase values to be used as LED RGB elif type(color) is str: color = NeoPx.COLOR_MAP[color] + # If protocol is ESP32, send commands via serial and exit early + if not self.use_mqtt and self.protocol == 'ESP32': + for i in identifiers: + if type(i) is str: + i = self.positions[i] + cmd = "SET {} {} {} {}\n".format(i, color[0], color[1], color[2]) + self.serial.write(cmd.encode()) + # minimal delay for serial transmission + sleep(0.01) + return for i in identifiers: if type(i) is str: i = self.positions[i] @@ -189,8 +569,15 @@ def set(self, identifiers, color, gradient=False): sleep(.1) def apply_brightness_modifier(self, identifier, color): - # Some neopixels do not need to be full brightness. Reduce intensity with the BRIGHTNESS_MODIFIER for each neopixel - return (round(color[0]*self.brightness[identifier]), round(color[1]*self.brightness[identifier]), round(color[2]*self.brightness[identifier])) + # Eğer brightness listesi tanımlı değilse ya da index dışındaysa, renk olduğu gibi döner + if not self.brightness or identifier >= len(self.brightness): + return color + + return ( + round(color[0]*self.brightness[identifier]), + round(color[1]*self.brightness[identifier]), + round(color[2]*self.brightness[identifier]) + ) def ring(self, color): self.set(self.ring_eye, color) @@ -198,12 +585,10 @@ def ring(self, color): def flashlight(self, on): if on: self.set(self.all_eye, NeoPx.COLOR_WHITE_FULL) - set_led_pin(True) self.overridden = True else: self.overridden = False self.set(self.all_eye, NeoPx.COLOR_OFF) - set_led_pin(False) def off(self): if self.thread: @@ -221,19 +606,40 @@ def full(self, color): def eye(self, color): if 'middle' not in self.positions: raise ValueError('Middle position not found') - if color not in NeoPx.COLOR_MAP.keys(): - raise ValueError('Color not found') + + # Normalize the color using the helper function + normalized_color = self.color_to_arduino_format(color) + if not normalized_color: + raise ValueError('Invalid color format or color not found') + index = self.positions['middle'] - if (self.count < index): + if self.count <= index: index = self.count - 1 pub.sendMessage('log', msg='[LED] Error in set pixels: index out of range, changing to last pixel') - if self.pixels[index] != color: + + # Use the normalized color + self.set(index, normalized_color) + + # Handle ESP32 protocol differently + if not self.use_mqtt and self.protocol == 'ESP32': + pub.sendMessage('log', msg='[LED] Setting eye colour: ' + color) + # For ESP32, use the set method which already handles serial communication + return + + # For other protocols, only proceed if pixels exist + if hasattr(self, 'pixels'): pub.sendMessage('log', msg='[LED] Setting eye colour: ' + color) self.set(index, NeoPx.COLOR_MAP[color]) def party(self): + if not self.use_mqtt and self.protocol == 'ESP32': + # For ESP32, use the RAINBOW_CYCLE animation + self.send_animation_command('RAINBOW_CYCLE') + return + + # Original party implementation for other protocols # self.animate(self.all, 'off', 'rainbow_cycle') - + for j in range(256 * 1): for i in range(self.count): self.set(i, NeoPx._wheel((int(i * 256 / self.count) + j) & 255)) @@ -255,7 +661,12 @@ def animate(self, identifiers, color, animation): pub.sendMessage('log', msg='[LED] Animation already started. Command ignored') return - pub.sendMessage('log', msg='[LED] Animation starting: ' + animation) + pub.sendMessage('log', msg=f'[LED] Running animation: {animation}') + + # For ESP32, use the send_animation_command method + if not self.use_mqtt and self.protocol == 'ESP32': + self.send_animation_command(animation, color) + return animations = { 'spinner': self.spinner, @@ -267,8 +678,266 @@ def animate(self, identifiers, color, animation): self.animation = True if animation in animations: self.thread = threading.Thread(target=animations[animation], args=(identifiers, color,)) - self.thread.start() + self.thread.start() + def handle_animate(self, animation, color=None, color2=None, repeat=1): + """ + Handle animation requests from pubsub + :param animation: animation name + :param color: optional first color + :param color2: optional second color + :param repeat: optional repeat count (1-10) + """ + # Log all animation requests for debugging + pub.sendMessage('log:info', msg=f"[NeoPx] handle_animate called: animation={animation}, color={color}, color2={color2}, repeat={repeat}") + + if not self.use_mqtt and self.protocol == 'ESP32': + # Add more detailed debug info + pub.sendMessage('log:info', msg=f"[NeoPx] Sending ESP32 animation command: {animation}") + self.send_animation_command(animation, color, color2, repeat) + else: + # Fall back to standard animation system + color_val = color if color else NeoPx.COLOR_RED + self.animate(self.all, color_val, animation.lower()) + + # Mevcut send_animation_command fonksiyonunun başını değiştirin: + def send_animation_command(self, animation, color=None, color2=None, repeat=1): + if self.use_mqtt: + payload = { + "cmd": "ANIMATE", + "animation": animation, + "color": color, + "color2": color2, + "repeat": repeat + } + self.mqtt_client.publish("sentrybot/cmd", json.dumps(payload)) + return + """ Send animation command to ESP32 """ + elif self.protocol != 'ESP32' or not hasattr(self, 'serial') or not self.serial: + pub.sendMessage('log:warning', msg="[NeoPx] Cannot send animation command: Not connected to ESP32.") + return + + # Ensure animation name is uppercase for ESP32 + animation_upper = str(animation).upper() if animation else "RAINBOW_CYCLE" # Default if None + + # Ensure repeat is within bounds + try: + repeat_val = max(1, min(10, int(repeat))) + except (ValueError, TypeError): + repeat_val = 1 + + # Convert colors to Arduino format + color_str = self.color_to_arduino_format(color) if color else "" + color2_str = self.color_to_arduino_format(color2) if color2 else "" + + # Construct the command string: ANIMATE [COLOR1] [COLOR2] + # Note: Order matters. ESP32 code must parse this correctly. + # Assuming format: ANIMATE NAME COLOR1 COLOR2 REPEAT (adjust if different) + cmd_parts = ["ANIMATE", animation_upper] + if color_str: + cmd_parts.append(color_str) + if color2_str: + # Only add color2 if color1 was also present (or adjust ESP32 logic) + if color_str: + cmd_parts.append(color2_str) + else: + pub.sendMessage('log:warning', msg=f"[NeoPx] Color2 specified ('{color2_str}') but Color1 is missing for animation '{animation_upper}'. Sending without Color2.") + cmd_parts.append(str(repeat_val)) + + cmd = " ".join(cmd_parts) + "\n" + + pub.sendMessage('log:info', msg=f"[NeoPx] Sending animation command to ESP32: {cmd.strip()}") + try: + if self.serial.is_open: + self.serial.write(cmd.encode('utf-8')) + sleep(0.05) # Small delay for serial buffer + else: + pub.sendMessage('log:error', msg="[NeoPx] Serial port is not open!") + except Exception as e: + pub.sendMessage('log:error', msg=f"[NeoPx] Error writing to serial port: {e}") + + + # Consolidated audio event handler + def handle_audio_event_pubsub(self, topic=pub.AUTO_TOPIC): + """Processes audio events like TTS start/end""" + event_type = topic.getName() # e.g., 'tts:speaking', 'tts:complete' + + if not self.use_mqtt and self.protocol != 'ESP32': return + + pub.sendMessage('log:info', msg=f"[NEOPX] Audio Event received: {event_type}") + + if event_type == 'tts:speaking': + # Optional: Subtle animation during speech. Avoid WAVE. + # Maybe a slow cyan pulse? + # self.send_animation_command('PULSE', color='CYAN', repeat=1) + pub.sendMessage('log:debug', msg="[NEOPX] TTS Speaking event - No specific animation triggered.") + pass # Let HEAD_NOD from GUI be the primary indicator + elif event_type == 'tts:complete': + # Revert to the animation appropriate for the current processing mode + pub.sendMessage('log:info', msg="[NEOPX] TTS complete, reverting to mode animation.") + # Force update ensures the mode animation is re-sent + self.handle_processing_mode_change({'mode': self.current_processing_mode, 'force_update': True}) + + # Handler for motion events received via PubSub + def handle_motion_event_pubsub(self, data): + detected = data.get('detected', False) + pub.sendMessage('log:info', msg=f"[NEOPX] Motion event received via PubSub: detected={detected}") + self.handle_motion_event(detected, source="gui_event") # Reuse existing logic + + + # Existing logic, now potentially called by pubsub handler too + def handle_motion_event(self, detected, source="unknown"): + """Update LED animations based on motion state change""" + if not self.use_mqtt and self.protocol != 'ESP32': + return + + # Only react if the mode is relevant (e.g., 'motion' or 'face_find'?) + # Or maybe always show motion? Depends on desired behavior. + # if self.current_processing_mode != 'motion': + # pub.sendMessage('log:debug', msg=f"[NEOPX] Ignoring motion event in mode: {self.current_processing_mode}") + # return + + pub.sendMessage('log:info', msg=f"[NEOPX] Motion state change: detected={detected}, source={source}") + + if detected: + # Motion detected: Red Comet (or choose another 'alert' animation) + self.send_animation_command('METEOR', color='RED', repeat=1) + else: + # Motion stopped: Revert to the animation for the current processing mode + pub.sendMessage('log:info', msg="[NEOPX] Motion stopped, reverting to mode animation.") + self.handle_processing_mode_change({'mode': self.current_processing_mode}) + + # Renamed/modified from handle_camera_mode + def handle_processing_mode_change(self, data): + """Update LED colors based on processing mode change""" + mode = data.get('mode', 'none') + if not self.use_mqtt and self.protocol != 'ESP32': + return + + # Only update if mode actually changes or forced + force_update = data.get('force_update', False) + if mode == self.current_processing_mode and not force_update: + return + + self.current_processing_mode = mode # Store the new mode + pub.sendMessage('log:info', msg=f"[NEOPX] Processing mode changed to: {mode}") + + if mode == 'motion': + # Green pulse for motion detection mode + self.send_animation_command('PULSE', color='GREEN', repeat=1) + elif mode == 'face_recognition': + # Blue pulse for face recognition mode + self.send_animation_command('PULSE', color='BLUE', repeat=1) + elif mode == 'finger': + self.send_animation_command('TWINKLE', color='GOLD', repeat=1) + elif mode =='age_emotion': + self.send_animation_command('RUNNING_LIGHTS',color='TEAL', repeat=1) + elif mode == 'face_find': + # Cyan pulse for face finding mode + self.send_animation_command('PULSE', color='PURPLE', repeat=1) + elif mode == 'none': + # White breathe/pulse for idle/no processing mode + self.send_animation_command('BREATHE', color='WHITE_DIM', repeat=1) # Use a dim white + else: + pub.sendMessage('log:warning', msg=f"[NEOPX] Unknown processing mode: {mode}") + # Default to idle animation + self.send_animation_command('BREATHE', color='WHITE_DIM', repeat=1) + + def handle_priority_person(self, person_name, animation_name="RAINBOW_CYCLE"): + """Öncelikli kişi algılandığında animasyon göster""" + + # Ensure animation name is valid/normalized - important fix! + if not animation_name or animation_name.lower() == "default": + animation_name = "RAINBOW_CYCLE" + + pub.sendMessage('log:info', msg=f"[NeoPx] Priority person detected: {person_name}, animation: {animation_name}") + + # SerialShareManager ile direkt komut gönder (daha güvenilir) + animation_cmd = f"ANIMATE {animation_name.upper()} GREEN 3" + send_command(animation_cmd, "neopx_priority") + + # AyrÃâ€Ã‚±ca normal animasyon kanalÃâ€Ã‚±yla da gönder + self.send_animation_command(animation_name, color="GREEN", repeat=3) + + # Return success status + return True + + # Animation methods for ESP32 protocol + def rainbow_esp32(self, repeat=1): + """Trigger rainbow animation on ESP32""" + self.send_animation_command('RAINBOW', repeat=repeat) + + def rainbow_cycle_esp32(self, repeat=1): + """Trigger rainbow cycle animation on ESP32""" + self.send_animation_command('RAINBOW_CYCLE', repeat=repeat) + + def spinner_esp32(self, color='RED', repeat=1): + """Trigger spinner animation on ESP32""" + self.send_animation_command('SPINNER', color, repeat=repeat) + + def breathe_esp32(self, color='RED', repeat=1): + """Trigger breathing animation on ESP32""" + self.send_animation_command('BREATHE', color, repeat=repeat) + + def meteor_esp32(self, color='WHITE', repeat=1): + """Trigger meteor rain animation on ESP32""" + self.send_animation_command('METEOR', color, repeat=repeat) + + def fire_esp32(self, repeat=1): + """Trigger fire animation on ESP32""" + self.send_animation_command('FIRE', repeat=repeat) + + def comet_esp32(self, color='CYAN', repeat=1): + """Trigger comet animation on ESP32""" + self.send_animation_command('COMET', color, repeat=repeat) + + def wave_esp32(self, repeat=1): + """Trigger wave animation on ESP32""" + self.send_animation_command('WAVE', repeat=repeat) + + def pulse_esp32(self, color='MAGENTA', repeat=1): + """Trigger pulse animation on ESP32""" + self.send_animation_command('PULSE', color, repeat=repeat) + + def twinkle_esp32(self, color='WHITE', repeat=1): + """Trigger twinkle animation on ESP32""" + self.send_animation_command('TWINKLE', color, repeat=repeat) + + def color_wipe_esp32(self, color='RED', repeat=1): + """Trigger color wipe animation on ESP32""" + self.send_animation_command('COLOR_WIPE', color, repeat=repeat) + + def random_blink_esp32(self, repeat=1): + """Trigger random blink animation on ESP32""" + self.send_animation_command('RANDOM_BLINK', repeat=repeat) + + def theater_chase_esp32(self, color='WHITE', repeat=1): + """Trigger theater chase animation on ESP32""" + self.send_animation_command('THEATER_CHASE', color, repeat=repeat) + + def snow_esp32(self, color='WHITE', repeat=1): + """Trigger snow animation on ESP32""" + self.send_animation_command('SNOW', color, repeat=repeat) + + def alternating_esp32(self, color='RED', color2='BLUE', repeat=1): + """Trigger alternating colors animation on ESP32""" + self.send_animation_command('ALTERNATING', color, color2, repeat) + + def gradient_esp32(self, repeat=1): + """Trigger gradient animation on ESP32""" + self.send_animation_command('GRADIENT', repeat=repeat) + + def bouncing_ball_esp32(self, color='RED', repeat=1): + """Trigger bouncing ball animation on ESP32""" + self.send_animation_command('BOUNCING_BALL', color, repeat=repeat) + + def running_lights_esp32(self, color='RED', repeat=1): + """Trigger running lights animation on ESP32""" + self.send_animation_command('RUNNING_LIGHTS', color, repeat=repeat) + + def stacked_bars_esp32(self, repeat=1): + """Trigger stacked bars animation on ESP32""" + self.send_animation_command('STACKED_BARS', repeat=repeat) def spinner(self, identifiers, color, index=1): """ @@ -343,14 +1012,21 @@ def rainbow_cycle(self, identifiers, color, wait_ms=20, iterations=5): for j in range(256 * iterations): for i in range(self.count): self.set(i, NeoPx._wheel((int(i * 256 / self.count) + j) & 255)) - t = threading.currentThread() + # t = threading.currentThread() if not getattr(t, "animation", True): return sleep(wait_ms / 1000) + # neopx.py içinde ekleyin - doÃâ€Ã…¸rudan çaÃâ€Ã…¸rÃâ€Ã‚± için + def priority_person_animation(self, person_name, animation_name='RAINBOW'): + """Run a priority person animation directly""" + pub.sendMessage('log:info', msg=f"[NeoPx] Running priority animation for {person_name}: {animation_name}") + self.handle_animate(animation_name, color='GREEN', repeat=2) + return True + if __name__ == '__main__': - inst = NeoPx(12) + inst = NeoPx(count=12) inst.set(inst.all, 1) sleep(2) inst.set(inst.all, 50) @@ -367,4 +1043,18 @@ def rainbow_cycle(self, identifiers, color, wait_ms=20, iterations=5): sleep(2) inst.flashlight(True) sleep(2) - inst.flashlight(False) \ No newline at end of file + inst.flashlight(False) + +"""Other modules can call this to access the ESP32 shared serial connection if available.""" +def get_shared_esp32_serial(): + """Get the shared ESP32 serial connection for use by other modules""" + global _esp32_shared_serial, _esp32_serial_initialized, _esp32_serial_lock + + + with _esp32_serial_lock: # Düzeltildi: _esp32_shared_serial_lock yerine _esp32_serial_lock kullanÃâ€Ã‚±ldÃâ€Ã‚± + # Check if the serial connection is properly initialized + if _esp32_shared_serial is not None and _esp32_serial_initialized: + return _esp32_shared_serial + + # If lock released and we get here, serial not available + return None diff --git a/modules/network/arduinoserial.py b/modules/network/arduinoserial.py index 642b319c..de687697 100644 --- a/modules/network/arduinoserial.py +++ b/modules/network/arduinoserial.py @@ -14,8 +14,7 @@ class ArduinoSerial: DEVICE_PIN = 2 DEVICE_PIN_READ = 3 DEVICE_SERVO_RELATIVE = 4 - DEVICE_MOTOR = 5 # Added MOTOR device type - ORDER_RECEIVED = 6 + ORDER_RECEIVED = 5 def __init__(self, **kwargs): self.port = kwargs.get('port', '/dev/ttyAMA0') self.baudrate = kwargs.get('baudrate', 115200) @@ -27,29 +26,25 @@ def __init__(self, **kwargs): def initialise(port, baudrate): try: print('Trying to select port ' + port) - serial_file = open_serial_port(serial_port=port, baudrate=baudrate, timeout=None) + serial_file = open_serial_port(serial_port=port, baudrate=baudrate, timeout=2) except Exception as e: raise e - is_connected = True # assume connection - # bytes_array = False - # attempts = 1 - # # Initialize communication with Arduino - # while not is_connected and attempts > 0: - # attempts = attempts -1 - # pub.sendMessage('log', msg="[ArduinoSerial] Waiting for arduino...") - # write_order(serial_file, Order.HELLO) - # bytes_array = bytearray(serial_file.read(1)) - # if not bytes_array: - # time.sleep(2) - # continue - # byte = bytes_array[0] - # if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]: - # is_connected = True - # if is_connected: - # pub.sendMessage('log', msg="[ArduinoSerial] Connected to Arduino") - # else: - # pub.sendMessage('log', msg="[ArduinoSerial] NOT CONNECTED") - # serial_file = None + # Bağlantı testi: HELLO gönder, cevap bekle + import time + from pubsub import pub + try: + write_order(serial_file, Order.HELLO) + bytes_array = bytearray(serial_file.read(1)) + if bytes_array: + byte = bytes_array[0] + if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]: + pub.sendMessage('log', msg=f"[ArduinoSerial] Arduino bağlantısı başarılı ({'HELLO' if byte==Order.HELLO.value else 'ALREADY_CONNECTED'})") + else: + pub.sendMessage('log', msg=f"[ArduinoSerial] Arduino'dan beklenmeyen cevap: {byte}") + else: + pub.sendMessage('log', msg="[ArduinoSerial] Arduino'dan cevap alınamadı!") + except Exception as e: + pub.sendMessage('log', msg=f"[ArduinoSerial] Bağlantı testi sırasında hata: {e}") return serial_file def read(self): @@ -122,14 +117,4 @@ def send(self, type, identifier, message): write_i8(self.serial_file, identifier) pub.sendMessage('led', identifiers='status5', color='off') return read_i16(self.serial_file) - elif type == ArduinoSerial.DEVICE_MOTOR or type == 'motor': # Added MOTOR handling - write_order(self.serial_file, Order.MOTOR) - write_i8(self.serial_file, identifier) - write_i8(self.serial_file, message) - pub.sendMessage('log', msg="[ArduinoSerial] Motor command sent: ID={}, Speed={}".format(identifier, message)) pub.sendMessage('led', identifiers='status5', color='off') - - def set_led_pin(self, on): - write_order(self.serial_file, Order.PIN) - write_i8(self.serial_file, 13) - write_i8(self.serial_file, 1 if on else 0) diff --git a/modules/network/robust_serial/robust_serial.py b/modules/network/robust_serial/robust_serial.py index cf5f1d33..83b54dc0 100644 --- a/modules/network/robust_serial/robust_serial.py +++ b/modules/network/robust_serial/robust_serial.py @@ -20,7 +20,6 @@ class Order(Enum): LED = 7 PIN = 8 READ = 9 - MOTOR = 10 # Added MOTOR order def read_order(f): """ @@ -105,7 +104,7 @@ def decode_order(f, byte, debug=False): msg = "SERVO {}".format(angle) elif order == Order.MOTOR: speed = read_i8(f) - msg = "MOTOR {}".format(speed) + msg = "motor {}".format(speed) elif order == Order.ALREADY_CONNECTED: msg = "ALREADY_CONNECTED" elif order == Order.ERROR: diff --git a/modules/network/robust_serial/threads.py b/modules/network/robust_serial/threads.py index e1d087c8..80137665 100644 --- a/modules/network/robust_serial/threads.py +++ b/modules/network/robust_serial/threads.py @@ -48,8 +48,7 @@ def run(self): write_order(self.serial_file, order) # print("Sent {}".format(order)) if order == Order.MOTOR: - write_i8(self.serial_file, param['motor_id']) # Added MOTOR handling - write_i8(self.serial_file, param['speed']) + write_i8(self.serial_file, param) elif order == Order.SERVO: write_i16(self.serial_file, param) time.sleep(rate) diff --git a/modules/network/robust_serial/utils.py b/modules/network/robust_serial/utils.py index 3ea06958..60a5b8f5 100644 --- a/modules/network/robust_serial/utils.py +++ b/modules/network/robust_serial/utils.py @@ -38,14 +38,16 @@ def get_serial_ports(): """ Lists serial ports. - :return: ([str]) A list of available serial ports """ if sys.platform.startswith('win'): ports = ['COM%s' % (i + 1) for i in range(256)] elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): - # this excludes your current terminal "/dev/tty" - ports = glob.glob('/dev/tty[A-Za-z]*') + # Önce /dev/ttyUSB* portlarını topla + usb_ports = glob.glob('/dev/ttyUSB*') + other_ports = glob.glob('/dev/tty[A-Za-z]*') + # /dev/ttyUSB* portlarını listenin başına al + ports = usb_ports + [p for p in other_ports if p not in usb_ports] elif sys.platform.startswith('darwin'): ports = glob.glob('/dev/tty.*') else: @@ -75,10 +77,11 @@ def open_serial_port(serial_port=None, baudrate=115200, timeout=0, write_timeout :return: (Serial Object) """ ports = get_serial_ports() - print('Serial port sent: ' + serial_port) - # Open serial port (for communication with Arduino) + print('Serial port sent: ' + str(serial_port)) + # Eğer serial_port belirtilmemişse, /dev/ttyUSB* portlarını öncelikli olarak seç if serial_port is None: - serial_port = ports[0] - # timeout=0 non-blocking mode, return immediately in any case, returning zero or more, - # up to the requested number of bytes + if ports: + serial_port = ports[0] + else: + raise serial.SerialException('No serial ports found') return serial.Serial(port=serial_port, baudrate=baudrate, timeout=timeout, writeTimeout=write_timeout) diff --git a/modules/ollama/audio_llm.py b/modules/ollama/audio_llm.py index b6a7d0a5..83a7cacd 100644 --- a/modules/ollama/audio_llm.py +++ b/modules/ollama/audio_llm.py @@ -2,6 +2,7 @@ import requests from pubsub import pub +import ollama class AudioLLM: def __init__(self, **kwargs): @@ -33,7 +34,8 @@ def handle_voice_input(self, text): # Optionally translate the input text. if self.translator is not None: try: - text = self.translator.translate(text, self.source_lang, self.target_lang) + # Changed to use translator.request(msg) instead of translator.translate(...) + text = self.translator.request(text, src=self.source_lang, dest=self.target_lang) print(f"[AudioLLM] Translated text: {text}") except Exception as e: print(f"[AudioLLM] Translation error: {e}") @@ -46,20 +48,21 @@ def handle_voice_input(self, text): print("[AudioLLM] No response from LLM API.") def send_to_llm(self, text): - """Sends the text to the LLM API and returns the response.""" - payload = {'input': text} + """Sends the text to the Ollama LLM using the ollama Python library and returns the response.""" try: - response = requests.post(self.ollama_url, json=payload, timeout=10) - if response.status_code == 200: - data = response.json() - output = data.get('output', '') - print(f"[AudioLLM] LLM API response: {output}") - return output - else: - print(f"[AudioLLM] LLM API status: {response.status_code}") - return None + # Model adı configden veya varsayılan olarak alınabilir + model = 'llama2' # Gerekirse configden alınabilir + if hasattr(self, 'model'): + model = self.model + response = ollama.generate( + model=model, + prompt=text + ) + output = response.get('response', '') + print(f"[AudioLLM] Ollama response: {output}") + return output except Exception as e: - print(f"[AudioLLM] Error contacting LLM API: {e}") + print(f"[AudioLLM] Error contacting Ollama: {e}") return None def handle_llm_response(self, response_text): diff --git a/modules/personality.py b/modules/personality.py index b1374c13..bf851164 100644 --- a/modules/personality.py +++ b/modules/personality.py @@ -1,125 +1,427 @@ +# personality.py dosyası (Integer State ve GUI String State ile UYUMLU HALİ) + from random import randint -from time import sleep, localtime +# from time import sleep, localtime # Eski satır +import time # YENİ: time modülünü import et from pubsub import pub from datetime import datetime, timedelta +from types import SimpleNamespace +import threading # Periyodik durum güncellemesi için +import traceback # Hata ayıklama için +import inspect # İmza kontrolü için (opsiyonel debug) -from modules.config import Config -from modules.behaviours.dream import Dream -from modules.behaviours.faces import Faces -from modules.behaviours.motion import Motion -from modules.behaviours.boredom import Boredom -from modules.behaviours.feel import Feel -from modules.behaviours.sleep import Sleep -from modules.behaviours.respond import Respond -from modules.behaviours.objects import Objects -from modules.behaviours.sentiment import Sentiment +# Config sabitlerini varsayılan deÄŸerlerle tanımla (veya gerçek Config modülünden import et) +class DefaultConfig: + MODE_LIVE = 'live' + # config.py'den alınacağı için buradaki tanımlar kullanılmayacak ama dursun + STATE_SLEEPING = 0 + STATE_RESTING = 1 + STATE_IDLE = 2 + STATE_ALERT = 3 + # DiÄŸer sabitler... -from types import SimpleNamespace +try: + # Gerçek yapılandırma modülünü import etmeyi dene + from modules.config import Config + print("[Personality] Loaded state constants from modules.config") +except ImportError: + print("[Personality] WARN: 'modules.config.Config' not found. Using default integer values.") + Config = DefaultConfig # Hata durumunda varsayılanı kullan -""" -This class dictates the behaviour of the robot, subscribing to various input events (face matches or motion) -and triggering animations as a result of those behaviours (or lack of) +# Davranış modüllerini import etmeyi dene (veya dummy sınıflar kullan) +try: + from modules.behaviours.dream import Dream + from modules.behaviours.faces import Faces + from modules.behaviours.motion import Motion + from modules.behaviours.boredom import Boredom + from modules.behaviours.feel import Feel + from modules.behaviours.sleep import Sleep + from modules.behaviours.respond import Respond + from modules.behaviours.objects import Objects + from modules.behaviours.sentiment import Sentiment + BEHAVIOURS_AVAILABLE = True +except ImportError as e: + print(f"[Personality] WARN: Could not import one or more behaviour modules: {e}. Using dummy behaviours.") + BEHAVIOURS_AVAILABLE = False + # Hata durumunda boÅŸ (dummy) davranış sınıfları oluÅŸtur + class DummyBehaviour: + def __init__(self, *args, **kwargs): pass + def __getattr__(self, name): return lambda *args, **kwargs: None # Herhangi bir metot çaÄŸrısını yoksay + Dream, Faces, Motion, Boredom, Feel, Sleep, Respond, Objects, Sentiment = (DummyBehaviour,) * 9 -It also stores the current 'state of mind' of the robot, so that we can simulate boredom and other emotions based -on the above stimulus. - -To update the personality status from another module, publish to the 'behaviour' topic one of the defined INPUT_TYPE constants: -from pubsub import pub -pub.sendMessage('behaviour', type=Personality.INPUT_TYPE_FUN) -""" class Personality: + # --- Sabitler (Config'den integer olarak alınır) --- + STATE_SLEEPING = Config.STATE_SLEEPING # 0 + STATE_RESTING = Config.STATE_RESTING # 1 + STATE_IDLE = Config.STATE_IDLE # 2 + STATE_ALERT = Config.STATE_ALERT # 3 + AVAILABLE_STATES = [STATE_SLEEPING, STATE_RESTING, STATE_IDLE, STATE_ALERT] # [0, 1, 2, 3] + + # --- YENİ: String'den Integer'a dönüşüm için bir sözlük --- + STATE_NAME_TO_INT = { + 'SLEEPING': STATE_SLEEPING, + 'RESTING': STATE_RESTING, + 'IDLE': STATE_IDLE, + 'ALERT': STATE_ALERT, + } + # --- YENİ: Integer'dan String'e dönüşüm için bir sözlük --- + STATE_INT_TO_NAME = {v: k for k, v in STATE_NAME_TO_INT.items()} + # Örnek kiÅŸilikler (gerçek uygulamada bir dosyadan veya config'den gelebilir) + DEFAULT_PERSONALITY = "Default" # Tek sabit kişilik def __init__(self, **kwargs): self.mode = kwargs.get('mode', Config.MODE_LIVE) - self.state = Config.STATE_SLEEPING - self.eye = 'blue' + # _state başlangıçta integer olacak (Config.STATE_SLEEPING yani 0) + self._state = Config.STATE_SLEEPING + self._eye_color = 'blue' # BaÅŸlangıç göz rengi + self._current_personality = "Default" # BaÅŸlangıç kiÅŸiliÄŸi + self._last_status_update_time = 0 + self._status_update_interval = kwargs.get('status_interval', 2) # Yapılandırmadan al veya varsayılan kullan + self._lock = threading.Lock() # Durum deÄŸiÅŸkenlerine thread-safe eriÅŸim için + + pub.sendMessage('log:info', msg="[Personality] Initializing...") - pub.subscribe(self.loop, 'loop:1') + # --- PubSub Abonelikleri --- + pub.subscribe(self._loop_update, 'loop:1') pub.subscribe(self.process_sentiment, 'sentiment') - - behaviours = { 'boredom': Boredom(self), - 'dream': Dream(self), - 'faces': Faces(self), - 'motion': Motion(self), - 'sleep': Sleep(self), - 'feel': Feel(self), - 'objects': Objects(self), - 'respond': Respond(self), - 'sentiment': Sentiment(self)} - - self.behaviours = SimpleNamespace(**behaviours) - - def loop(self): - # pub.sendMessage('speech', text="Hello, I am happy") # for testing sentiment responses - if not self.is_asleep() and not self.behaviours.faces.face_detected and not self.behaviours.motion.is_motion() and not self.behaviours.objects.is_detected: - self.set_eye('red') - - if self.state == Config.STATE_ALERT and self.lt(self.behaviours.faces.last_face, self.past(2*60)) and self.lt(self.behaviours.objects.last_detection, self.past(2*60)): - # reset to idle position after 2 minutes inactivity - pub.sendMessage('animate', action="wake") - self.set_state(Config.STATE_IDLE) + pub.subscribe(self.handle_set_state_cmd, 'cmd:personality:set_state') + pub.subscribe(self.handle_set_eye_cmd, 'cmd:personality:set_eye') + pub.subscribe(self.handle_get_status_cmd, 'cmd:personality:get_status') + pub.subscribe(self.on_face_detected, 'vision:face:detected') + pub.subscribe(self.on_motion_detected, 'vision:motion:detected') + pub.subscribe(self.on_object_detected, 'vision:object:detected') - def process_sentiment(self, score): - pub.sendMessage('log', msg="[Personality] Sentiment: " + str(score)) - if score > 0: - pub.sendMessage('piservo:move', angle=0) + + # --- Davranışları BaÅŸlat --- + if BEHAVIOURS_AVAILABLE: + try: + # Davranış modüllerini yükle + self.behaviours = SimpleNamespace() + + # Her bir davranış modülünü ayrı ayrı yüklemeyi dene ve hataları logla + try: + self.behaviours.dream = Dream(self) + pub.sendMessage('log:info', msg="[Personality] Loaded Dream module") + except Exception as e: + pub.sendMessage('log:error', msg=f"[Personality] Error loading Dream module: {e}") + + try: + self.behaviours.faces = Faces(self) + pub.sendMessage('log:info', msg="[Personality] Loaded Faces module") + except Exception as e: + pub.sendMessage('log:error', msg=f"[Personality] Error loading Faces module: {e}") + + # Diğer modüller... + + pub.sendMessage('log:info', msg="[Personality] Behaviours initialized.") + except Exception as e: + pub.sendMessage('log:error', msg=f"[Personality] Failed to initialize behaviours: {e}") + # Hata durumunda da boş bir behaviours nesnesi oluştur + self.behaviours = SimpleNamespace() else: - pub.sendMessage('piservo:move', angle=40) - - def set_eye(self, color): - if self.eye == color: - return - # pub.sendMessage('led', identifiers=['left', 'right'], color='off') - pub.sendMessage('led:eye', color=color) - self.eye = color + pub.sendMessage('log:warning', msg="[Personality] Running without behaviour modules.") + # Davranış modülleri olmadan da boş bir behaviours nesnesi oluştur + self.behaviours = SimpleNamespace() + + # BaÅŸlangıç durumunu ayarla (integer olarak) ve ilk durumu yayınla + self.set_state(self._state, force_publish=True) + # Loglarken okunabilir ismi kullan + initial_state_name = self.STATE_INT_TO_NAME.get(self._state, str(self._state)) + pub.sendMessage('log:info', msg=f"[Personality] Initialized. State: {initial_state_name} ({self._state}), Eye: {self._eye_color}") + + # --- Ana Döngü ve Durum Yönetimi --- + + def _loop_update(self): + """Called periodically (e.g., every second) by the main loop.""" + current_time = time.time() + + # Behaviours özelliğinin varlığını kontrol et (ekstra önlem) + if not hasattr(self, 'behaviours'): + self.behaviours = SimpleNamespace() # Mevcut değilse oluştur + pub.sendMessage('log:warning', msg="[Personality] Created missing behaviours attribute") + + if hasattr(self.behaviours, 'boredom') and callable(getattr(self.behaviours.boredom, 'loop', None)): + self.behaviours.boredom.loop() + + if not self.is_asleep(): + face_detected = getattr(self.behaviours.faces, 'face_detected', False) if hasattr(self.behaviours, 'faces') else False + motion_detected = getattr(self.behaviours.motion, 'is_motion', lambda: False)() if hasattr(self.behaviours, 'motion') else False + object_detected = getattr(self.behaviours.objects, 'is_detected', False) if hasattr(self.behaviours, 'objects') else False + + if not face_detected and not motion_detected and not object_detected: + pass + + last_face_time = getattr(self.behaviours.faces, 'last_face', self.past(9999)) if hasattr(self.behaviours, 'faces') else self.past(9999) + last_motion_time = getattr(self.behaviours.motion, 'last_motion', self.past(9999)) if hasattr(self.behaviours, 'motion') else self.past(9999) + last_object_time = getattr(self.behaviours.objects, 'last_detection', self.past(9999)) if hasattr(self.behaviours, 'objects') else self.past(9999) + + # Karşılaştırmaları integer sabitlerle yap + if (self._state == self.STATE_ALERT or self._state == self.STATE_IDLE) and \ + self.lt(last_face_time, self.past(5 * 60)) and \ + self.lt(last_motion_time, self.past(5 * 60)) and \ + self.lt(last_object_time, self.past(5 * 60)): + # Sadece durum RESTING değilse değiştir + if self._state != self.STATE_RESTING: + pub.sendMessage('log:info', msg="[Personality] Long inactivity detected, switching to RESTING state.") + self.set_state(self.STATE_RESTING) # Integer state gönder + + # Durumu periyodik olarak yayınla (GUI güncellemesi için) + if current_time - self._last_status_update_time >= self._status_update_interval: + self.publish_status() + self._last_status_update_time = current_time + + + # --- set_state METODU (Integer Bekler ve Göz Rengini DAHA DİKKATLİ Ayarlar) --- + def set_state(self, new_state_int, force_publish=False): + """Sets the robot's state (expects integer) and triggers associated actions. + Avoids overriding manually set eye color unless waking up or initializing.""" + if new_state_int not in self.AVAILABLE_STATES: + pub.sendMessage('log:warning', msg=f"[Personality] Invalid state integer requested: {new_state_int}") + return + + with self._lock: + if self._state == new_state_int and not force_publish: + # Durum zaten aynıysa ve zorla yayınlama yoksa çık + return + previous_state_int = self._state + self._state = new_state_int + changed = True + + if changed: + prev_name = self.STATE_INT_TO_NAME.get(previous_state_int, str(previous_state_int)) + new_name = self.STATE_INT_TO_NAME.get(self._state, str(self._state)) + pub.sendMessage('log:info', msg=f"[Personality] State changed: {prev_name} ({previous_state_int}) -> {new_name} ({self._state})") - def set_state(self, state): - if self.state == state: + # Yeni duruma göre eylemleri tetikle + # Göz rengini SADECE uykudan uyanırken veya ilk başlatmada VARSAYILAN renge döndür + reset_eye_to_default = False + default_color = None # Varsayılan renk (henüz belirlenmedi) + + # Sadece durum gerçekten değişiyorsa ve önceki durum uyku ise resetle + # VEYA force_publish True ise (bu ilk başlatma anlamına gelir) resetle + needs_default_eye = (previous_state_int == self.STATE_SLEEPING and self._state != self.STATE_SLEEPING) or force_publish + + if self._state == self.STATE_SLEEPING: + pub.sendMessage('cmd:animate', action="sleep") + pub.sendMessage('cmd:led:set_pattern', pattern='off') + pub.sendMessage('cmd:servo', servo='all', value=0, absolute=True) + pub.sendMessage('state:enter:sleeping') + default_color = 'off' + if needs_default_eye: reset_eye_to_default = True + elif self._state == self.STATE_RESTING: + pub.sendMessage('cmd:animate', action="sit") + pub.sendMessage('cmd:led:set_pattern', pattern='breathing_blue') + pub.sendMessage('cmd:servo', servo='tilt', value=-40, absolute=True) + pub.sendMessage('state:enter:resting') + default_color = 'blue' + if needs_default_eye: reset_eye_to_default = True + elif self._state == self.STATE_IDLE: + if previous_state_int == self.STATE_SLEEPING or previous_state_int == self.STATE_RESTING: + pub.sendMessage('cmd:animate', action="wake") + pub.sendMessage('cmd:animate', action="idle_look") + pub.sendMessage('cmd:led:set_pattern', pattern='solid_green') + pub.sendMessage('cmd:servo', servo='tilt', value=-20, absolute=True) + pub.sendMessage('state:enter:idle') + default_color = 'green' + if needs_default_eye: reset_eye_to_default = True + elif self._state == self.STATE_ALERT: + if previous_state_int == self.STATE_SLEEPING or previous_state_int == self.STATE_RESTING: + pub.sendMessage('cmd:animate', action="wake") + pub.sendMessage('cmd:animate', action="alert_scan") + pub.sendMessage('cmd:led:set_pattern', pattern='fast_blink_red') + pub.sendMessage('cmd:servo', servo='tilt', value=0, absolute=True) + pub.sendMessage('state:enter:alert') + default_color = 'red' + if needs_default_eye: reset_eye_to_default = True + + eye_color_changed = False + # Göz rengini sadece gerekiyorsa varsayılan renge ayarla + if reset_eye_to_default and default_color is not None: + if self._eye_color != default_color: + pub.sendMessage('log:debug', msg=f"[Personality] Resetting eye color to default '{default_color}' for state {new_name}.") + self.set_eye(default_color, force_publish=True) # Bu zaten publish_status çağıracak + eye_color_changed = True + + # Eğer göz rengi değişmediyse, durum değişikliğini manuel olarak yayınla + if not eye_color_changed: + self.publish_status() + + # --- get_state METODU (Integer Döndürür) --- + def get_state(self): + """Returns the current state integer.""" + with self._lock: + return self._state + + def set_eye(self, color, force_publish=False): + """Sets the eye color and publishes the change.""" + valid_colors = ['blue', 'red', 'green', 'yellow', 'purple', 'cyan', 'white', 'off'] + is_valid = color in valid_colors or (isinstance(color, str) and color.startswith('#') and len(color) == 7) + if not is_valid: + pub.sendMessage('log:warning', msg=f"[Personality] Invalid eye color: {color}") return + + changed = False # Değişiklik olup olmadığını takip etmek için değişken oluştur + with self._lock: + # Eğer renk değişmiyorsa ve yayınlama zorunlu değilse çık + if self._eye_color == color and not force_publish: + return + # Rengi değiştir ve changed değişkenini true yap + self._eye_color = color # Bu satır eksikti - rengi gerçekten güncelliyoruz + changed = True + + if changed: + pub.sendMessage('log:info', msg=f"[Personality] Eye color set to: {self._eye_color}") + # LED modülüne komut gönder - KONU İSİMLERİNİ GÜNCELLİYORUZ + if color == 'off': + pub.sendMessage('led:off') # cmd:led:eye_off yerine led:off kullan + else: + pub.sendMessage('led:eye', color=self._eye_color) # cmd:led:eye_color yerine led:eye kullan + # Göz rengi değiştiğinde durumu YAYINLA (state string olarak gider) + self.publish_status() + # --- get_eye_color METODU (Aynı Kalır) --- + def get_eye_color(self): + """Returns the current eye color.""" + with self._lock: + return self._eye_color + + def publish_status(self): + """Publishes the current status of the personality module (sends state as string name).""" + with self._lock: + # Mevcut integer state'i string isme çevir + current_state_name = self.STATE_INT_TO_NAME.get(self._state, f"UNKNOWN_STATE_{self._state}") + status_data = { + 'state': current_state_name, # <<< GUI için string'e çevir + 'eye_color': self._eye_color, + # 'current_personality': self._current_personality, # Kişilik bilgisi artık gönderilmiyor + 'timestamp': time.time() + } + + # Mesajı gönder (status_data içinde 'state' artık string) + pub.sendMessage('data:robot_status', status=status_data) + # Log mesajını güncelle + pub.sendMessage('log:debug', msg=f"[Personality] Published status (state as string): {status_data}") + # --- Gelen PubSub Olaylarını İşleyen Metotlar --- + + def process_sentiment(self, score): + """Handles sentiment analysis results.""" + pub.sendMessage('log:info', msg=f"[Personality] Received sentiment score: {score:.2f}") + if not self.is_asleep(): # Integer karşılaştırması yapar + if score > 0.5: + pub.sendMessage('cmd:animate', action='happy_wiggle') + pub.sendMessage('cmd:servo', servo='tilt', value=10, absolute=False) + elif score < -0.5: + pub.sendMessage('cmd:animate', action='sad_droop') + pub.sendMessage('cmd:servo', servo='tilt', value=-10, absolute=False) + + def on_face_detected(self, face_info): + """Handles face detection events from the vision module.""" + name = face_info.get('name', 'Unknown') + # is_priority kısmını kaldır + pub.sendMessage('log:info', msg=f"[Personality] Face detected: {name}") + + if self.is_asleep() or self.is_resting(): + self.set_state(self.STATE_IDLE) + # Yüz algılandığında ALERT durumuna geçmek daha mantıklı olabilir + elif self._state != self.STATE_ALERT: + self.set_state(self.STATE_ALERT) + + if hasattr(self.behaviours, 'faces') and callable(getattr(self.behaviours.faces, 'process_detection', None)): + self.behaviours.faces.process_detection(face_info) + + if name != "Unknown": + # Öncelikli kişi kontrolünü kaldır + pub.sendMessage('cmd:animate', action='greet') - pub.sendMessage('log', msg="[Personality] State: " + str(state)) - if state == Config.STATE_SLEEPING: - pub.sendMessage("sleep") - pub.sendMessage("animate", action="sleep") - pub.sendMessage("animate", action="sit") - pub.sendMessage("led:off") - pub.sendMessage("led", identifiers=['status1'], color="off") - pub.sendMessage('piservo:move', angle=0) - elif state == Config.STATE_RESTING: - pub.sendMessage('rest') - pub.sendMessage("animate", action="sit") - pub.sendMessage("animate", action="sleep") - self.set_eye('blue') - pub.sendMessage("led", identifiers=['status1'], color="red") - pub.sendMessage('piservo:move', angle=-40) - elif state == Config.STATE_IDLE: - if self.state == Config.STATE_RESTING or self.state == Config.STATE_SLEEPING: - pub.sendMessage('wake') - pub.sendMessage('animate', action="wake") - pub.sendMessage('animate', action="sit") - pub.sendMessage("led", identifiers=['status1'], color="green") - pub.sendMessage('piservo:move', angle=-20) - self.set_eye('blue') - elif state == Config.STATE_ALERT: - if self.state == Config.STATE_RESTING or self.state == Config.STATE_SLEEPING: - pub.sendMessage('wake') - pub.sendMessage('animate', action="wake") - # pub.sendMessage('animate', action="stand") - pub.sendMessage('piservo:move', angle=0) - pub.sendMessage("led", identifiers=['status1'], color="blue") - self.state = state + def on_motion_detected(self, motion_areas): + """Handles motion detection events.""" + if not motion_areas: return + pub.sendMessage('log:info', msg=f"[Personality] Motion detected in {len(motion_areas)} area(s).") + + if self.is_asleep() or self.is_resting(): + self.set_state(self.STATE_IDLE) + elif self._state != self.STATE_ALERT: + self.set_state(self.STATE_ALERT) + + if hasattr(self.behaviours, 'motion') and callable(getattr(self.behaviours.motion, 'process_detection', None)): + self.behaviours.motion.process_detection(motion_areas) + + def on_object_detected(self, objects): + """Handles object detection events.""" + if not objects: return + detected_labels = [obj.get('label', '?') for obj in objects] + pub.sendMessage('log:info', msg=f"[Personality] Objects detected: {', '.join(detected_labels)}") + + if self.is_asleep() or self.is_resting(): + self.set_state(self.STATE_IDLE) + elif self._state != self.STATE_ALERT: + self.set_state(self.STATE_ALERT) + + if hasattr(self.behaviours, 'objects') and callable(getattr(self.behaviours.objects, 'process_detection', None)): + self.behaviours.objects.process_detection(objects) + + + # --- GUI'den Gelen Komut İsteklerini İşleyen Handler'lar --- + + # handle_set_state_cmd METODU (String'i Integer'a çevirir) + def handle_set_state_cmd(self, state): + """Handles 'cmd:personality:set_state' pubsub message. Converts string name to int.""" + pub.sendMessage('log:info', msg=f"[Personality] Received command to set state: {state} (type: {type(state)})") + try: + target_state_int = None + if isinstance(state, int) and state in self.AVAILABLE_STATES: + target_state_int = state + elif isinstance(state, str): + upper_state_name = state.upper() + if upper_state_name in self.STATE_NAME_TO_INT: + target_state_int = self.STATE_NAME_TO_INT[upper_state_name] + elif state.isdigit(): + potential_int = int(state) + if potential_int in self.AVAILABLE_STATES: + target_state_int = potential_int + + if target_state_int is not None: + self.set_state(target_state_int, force_publish=True) # force_publish=True ile çağır + else: + pub.sendMessage('log:warning', msg=f"[Personality] Invalid or unknown state requested: {state}") + + except Exception as e: + pub.sendMessage('log:error', msg=f"[Personality] Error setting state for value '{state}': {e}\n{traceback.format_exc()}") + + # handle_set_eye_cmd METODU (Aynı Kalır) + def handle_set_eye_cmd(self, color): + """Handles 'cmd:personality:set_eye' pubsub message.""" + pub.sendMessage('log:info', msg=f"[Personality] Received command to set eye color: {color}") + # GUI'den gelen komutlarda her zaman force_publish=True ile çağır + self.set_eye(color, force_publish=True) + + # handle_get_status_cmd METODU (Aynı Kalır, publish_status string gönderir) + def handle_get_status_cmd(self): + """Handles 'cmd:personality:get_status' pubsub message.""" + pub.sendMessage('log:info', msg="[Personality] Received command to get status.") + self.publish_status() # String state içeren status_data gönderir def is_asleep(self): - return self.state == Config.STATE_SLEEPING + """Checks if the robot is in SLEEPING state (integer comparison).""" + return self.get_state() == self.STATE_SLEEPING # 0 ile karşılaştırır def is_resting(self): - return self.state == Config.STATE_SLEEPING or self.state == Config.STATE_RESTING + """Checks if the robot is in SLEEPING or RESTING state (integer comparison).""" + state_int = self.get_state() + return state_int == self.STATE_SLEEPING or state_int == self.STATE_RESTING # 0 veya 1 - def lt(self, date, compare): - return date is None or date < compare + def lt(self, date, compare_date): + """Checks if 'date' is less than 'compare_date'. Handles None.""" + if date is None: return True + if compare_date is None: return False + return date < compare_date def past(self, seconds): - return datetime.now() - timedelta(seconds=seconds) \ No newline at end of file + """Returns a datetime object representing 'seconds' ago.""" + return datetime.now() - timedelta(seconds=seconds) + + def stop(self): + """Cleanup resources (if any). Called on exit.""" + pub.sendMessage('log:info', msg="[Personality] Stopping...") diff --git a/modules/rfid.py b/modules/rfid.py new file mode 100644 index 00000000..8e70a1c0 --- /dev/null +++ b/modules/rfid.py @@ -0,0 +1,138 @@ +from pubsub import pub +import serial +import time +import threading +from modules.config import Config +from modules.neopixel.neopx import get_shared_esp32_serial +import sys +import os + +# Add path to SerialShareManager +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +from modules.SerialShareManager import get_serial + +class RFIDReader: + def __init__(self, **kwargs): + """ + RFID Reader class + :param kwargs: pin, protocol, cards + :param pin: GPIO pin number (SDA pin) + :param protocol: 'GPIO' (default) or 'ESP32' + :param cards: Dictionary of card IDs and names + + Publishes 'rfid:card' when card detected + - Arguments: card_id, name + + Example: + pub.subscribe(handleCard, 'rfid:card') + """ + self.pin = kwargs.get('pin') + self.cards = kwargs.get('cards', {}) + self.running = True + self.serial = None # Will be set if ESP32 protocol is used + self.auto_enable = kwargs.get('auto_enable', False) # Default to False if not specified + self.verbose_logging = kwargs.get('verbose_logging', False) # Logging detay kontrolü + + # Protokol desteği ekleme + accepted_protocols = ['GPIO', 'ESP32'] + self.protocol = kwargs.get('protocol', 'GPIO') + if self.protocol not in accepted_protocols: + raise ValueError("Invalid protocol specified. Choose one of: " + ", ".join(accepted_protocols)) + + # ESP32 protocol handling with improved retry logic + if self.protocol == 'ESP32': + # Subscribe to both events for better resilience + pub.subscribe(self.on_serial_ready, 'esp32:serial_ready') + pub.subscribe(self.on_serial_ready, 'esp32:serial_global_ready') + + # Try to get the shared serial connection - first with new method + self.serial = get_serial('rfid') + if not self.serial: + # Try old method as fallback + self.serial = get_shared_esp32_serial() + + if self.serial and self.auto_enable: + self.enable_rfid_monitoring() + else: + # Start retry timer with better timing + retry_timer = threading.Timer(1.5, self.retry_get_serial) + retry_timer.daemon = True + retry_timer.start() + + # Subscribe to raw card events + pub.subscribe(self.on_card_detected, 'rfid:card_raw') + else: + # GPIO protocol handling + if self.verbose_logging: + pub.sendMessage('log', msg=f'[RFID] RFID reader initialized on pin {self.pin} using GPIO protocol') + # Add GPIO implementation here if needed + + pub.subscribe(self.cleanup, 'exit') + + def on_serial_ready(self): + """Called when the shared serial connection becomes available""" + self.serial = get_shared_esp32_serial() + if self.serial: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[RFID] Connected to shared ESP32 serial') + # We'll initialize RFID monitoring after a short delay + timer = threading.Timer(0.5, self.enable_rfid_monitoring) + timer.daemon = True + timer.start() + + # Enable monitoring if auto_enable is true + if self.auto_enable and self.serial: + self.enable_rfid_monitoring() + + def retry_get_serial(self): + """Retry getting the serial connection""" + if self.serial is None: + # Try new method first + self.serial = get_serial('rfid') + if not self.serial: + # Fall back to old method + self.serial = get_shared_esp32_serial() + + if self.serial: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[RFID] Connected to shared ESP32 serial on retry') + self.enable_rfid_monitoring() + else: + # Schedule another retry - with random backoff + delay = 2.0 + (time.time() % 1.0) # Add a bit of randomness to prevent synchronization + retry_timer = threading.Timer(delay, self.retry_get_serial) + retry_timer.daemon = True + retry_timer.start() + + def enable_rfid_monitoring(self): + """Enable RFID monitoring on the ESP32""" + if self.protocol == 'ESP32': + if self.serial is None: + self.serial = get_shared_esp32_serial() + if self.serial is None: + return + + # Send command to enable RFID monitoring + cmd = f"MONITOR_RFID 1\n" + try: + self.serial.write(cmd.encode()) + if self.verbose_logging: + pub.sendMessage('log', msg=f'[RFID] Monitoring enabled') + except Exception as e: + pub.sendMessage('log', msg=f'[RFID] ERROR: Failed to enable RFID monitoring: {e}') + self.serial = None # Reset serial connection to trigger reconnect + + def on_card_detected(self, card_id): + """Handle card detection events from the shared serial processor""" + name = self.cards.get(card_id, "Unknown") + pub.sendMessage('rfid:card', card_id=card_id, name=name) + pub.sendMessage('led:animate', animation='STACKED_BARS') # Ensure this line starts with 4 spaces + # Her zaman kart algılandığında log yazılır - bu önemli bir olaydır + pub.sendMessage('log', msg=f'[RFID] Card detected: {card_id} ({name})') + + def cleanup(self): + """ + Cleanup resources + """ + self.running = False + # Nothing to clean up for ESP32 protocol as we're using the shared serial diff --git a/modules/sensor.py b/modules/sensor.py index 855171b0..d403bee5 100644 --- a/modules/sensor.py +++ b/modules/sensor.py @@ -1,11 +1,24 @@ from gpiozero import MotionSensor from pubsub import pub +import serial +import time +import threading +from modules.config import Config +from modules.neopixel.neopx import get_shared_esp32_serial +import sys +import os + +# Add path to SerialShareManager +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +from modules.SerialShareManager import get_serial class Sensor: def __init__(self, **kwargs): """ Sensor class - :param kwargs: pin + :param kwargs: pin, protocol + :param pin: GPIO pin number + :param protocol: 'GPIO' (default) or 'ESP32' Install: pip install gpiozero @@ -14,24 +27,135 @@ def __init__(self, **kwargs): Example: pub.subscribe(handleMotion, 'motion') - """ self.pin = kwargs.get('pin') self.value = None - self.sensor = MotionSensor(self.pin) + self.sensor = None + self.running = True + self.serial = None # Will be set if ESP32 protocol is used + self.auto_enable = kwargs.get('auto_enable', False) # Default to False if not specified + self.verbose_logging = kwargs.get('verbose_logging', False) # Logging detay kontrolü + + # Protokol desteği ekleme + accepted_protocols = ['GPIO', 'ESP32'] + self.protocol = kwargs.get('protocol', 'GPIO') + if self.protocol not in accepted_protocols: + raise ValueError("Invalid protocol specified. Choose one of: " + ", ".join(accepted_protocols)) + + # ESP32 protocol handling with improved retry logic + if self.protocol == 'ESP32': + # Subscribe to both events for better resilience + pub.subscribe(self.on_serial_ready, 'esp32:serial_ready') + pub.subscribe(self.on_serial_ready, 'esp32:serial_global_ready') + + # Try to get the shared serial connection - first with new method + self.serial = get_serial('motion') + if not self.serial: + # Try old method as fallback + self.serial = get_shared_esp32_serial() + + if self.serial and self.auto_enable: + self.enable_motion_monitoring() + else: + # Start retry timer with better timing + retry_timer = threading.Timer(1.0, self.retry_get_serial) + retry_timer.daemon = True + retry_timer.start() + + # Subscribe to motion events from shared processor + pub.subscribe(self.on_motion_detected, 'motion:detected') + else: + # GPIO protocol handling + self.sensor = MotionSensor(self.pin) + if self.verbose_logging: + pub.sendMessage('log', msg=f'[MOTION] Motion sensor initialized on pin {self.pin} using GPIO protocol') + pub.subscribe(self.loop, 'loop:1') + pub.subscribe(self.cleanup, 'exit') + + def on_serial_ready(self): + """Called when the shared serial connection becomes available""" + self.serial = get_shared_esp32_serial() + if self.serial: + # We'll initialize motion monitoring after a short delay + timer = threading.Timer(0.5, self.enable_motion_monitoring) + timer.daemon = True + timer.start() + + # Enable monitoring if auto_enable is true + if self.auto_enable and self.serial: + self.enable_motion_monitoring() + + def retry_get_serial(self): + """Retry getting the serial connection""" + if self.serial is None: + # Try new method first + self.serial = get_serial('motion') + if not self.serial: + # Fall back to old method + self.serial = get_shared_esp32_serial() + + if self.serial: + if self.verbose_logging: + pub.sendMessage('log', msg=f'[MOTION] Connected to shared ESP32 serial on retry') + self.enable_motion_monitoring() + else: + # Schedule another retry - with random backoff + delay = 2.0 + (time.time() % 1.5) # Add a bit of randomness to prevent synchronization + retry_timer = threading.Timer(delay, self.retry_get_serial) + retry_timer.daemon = True + retry_timer.start() + + def enable_motion_monitoring(self): + """Enable motion monitoring on the ESP32""" + if self.protocol == 'ESP32': + if self.serial is None: + self.serial = get_shared_esp32_serial() + if self.serial is None: + return + + # Send command to enable motion monitoring + cmd = f"MONITOR_MOTION 1\n" + try: + self.serial.write(cmd.encode()) + if self.verbose_logging: + pub.sendMessage('log', msg=f'[MOTION] Monitoring enabled for pin {self.pin}') + except Exception as e: + pub.sendMessage('log', msg=f'[MOTION] ERROR: Failed to enable motion monitoring: {e}') + self.serial = None # Reset serial connection to trigger reconnect + + def on_motion_detected(self, pin): + """ + Handle motion detection events from shared serial processor + Only trigger if pin matches our configured pin + """ + if pin == self.pin: + pub.sendMessage('motion') + # Hareket algılandığında her zaman log yazılır - bu önemli bir olaydır + pub.sendMessage('log', msg=f'[MOTION] Motion detected on pin {self.pin}') def loop(self): - if self.read(): + """ + Called on loop:1 event + """ + if self.protocol == 'GPIO' and self.read(): pub.sendMessage('motion') + # GPIO protokolünü kullanırken hareket algılandığında log yazılır + pub.sendMessage('log', msg=f'[MOTION] Motion detected on pin {self.pin}') def read(self): - self.value = self.sensor.motion_detected - return self.value - - # def watch(self, edge, callback): - # """ - # :param edge: pigpio.EITHER_EDGE, pigpio.FALLING_EDGE, pigpio.RISING_EDGE - # :param callback: method to call when change detected - # """ - # return self.pi.callback(self.pin, edge, callback) + """ + Read motion sensor value + :return: True if motion detected, False otherwise + """ + if self.protocol == 'GPIO' and self.sensor: + self.value = self.sensor.motion_detected + return self.value + return False + + def cleanup(self): + """ + Cleanup resources + """ + self.running = False + # Nothing to clean up for ESP32 protocol as we're using the shared serial diff --git a/modules/archived/opencv/faces.py b/modules/vision/opencv/faces.py similarity index 100% rename from modules/archived/opencv/faces.py rename to modules/vision/opencv/faces.py diff --git a/modules/archived/opencv/haarcascade_frontalface_default.xml b/modules/vision/opencv/haarcascade_frontalface_default.xml similarity index 100% rename from modules/archived/opencv/haarcascade_frontalface_default.xml rename to modules/vision/opencv/haarcascade_frontalface_default.xml diff --git a/modules/archived/opencv/timelapse.py b/modules/vision/opencv/timelapse.py similarity index 100% rename from modules/archived/opencv/timelapse.py rename to modules/vision/opencv/timelapse.py diff --git a/modules/archived/opencv/tracking.py b/modules/vision/opencv/tracking.py similarity index 100% rename from modules/archived/opencv/tracking.py rename to modules/vision/opencv/tracking.py diff --git a/modules/archived/opencv/train_model.py b/modules/vision/opencv/train_model.py similarity index 100% rename from modules/archived/opencv/train_model.py rename to modules/vision/opencv/train_model.py diff --git a/modules/archived/opencv/video_stream.py b/modules/vision/opencv/video_stream.py similarity index 100% rename from modules/archived/opencv/video_stream.py rename to modules/vision/opencv/video_stream.py diff --git a/modules/archived/opencv/vision.py b/modules/vision/opencv/vision.py similarity index 100% rename from modules/archived/opencv/vision.py rename to modules/vision/opencv/vision.py diff --git a/modules/vision/remote/__init__.py b/modules/vision/remote/__init__.py new file mode 100644 index 00000000..c546670d --- /dev/null +++ b/modules/vision/remote/__init__.py @@ -0,0 +1,12 @@ +# Module initialization file for remote vision system +# This ensures proper component loading with GStreamer support + +import os +import platform + +# Enable GStreamer support by default on ARM platforms (Raspberry Pi) +if 'arm' in platform.machine(): + os.environ["OPENCV_VIDEOIO_PRIORITY_MSMF"] = "0" + os.environ["OPENCV_VIDEOIO_PRIORITY_V4L"] = "1" + os.environ["OPENCV_VIDEOIO_PRIORITY_V4L2"] = "1" + os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "1" # Enable GStreamer support diff --git a/modules/vision/remote/faces.py b/modules/vision/remote/faces.py new file mode 100644 index 00000000..9d374190 --- /dev/null +++ b/modules/vision/remote/faces.py @@ -0,0 +1,242 @@ +import face_recognition +import pickle +import cv2 +import time +from collections import Counter +import shutil +from pubsub import pub +from datetime import datetime, timedelta +import os +import json + +class Faces: + UNKNOWN_LABEL = 'Unknown' + MATCH_THRESHOLD_PERCENT = 40 + + def __init__(self, **kwargs): + self.path = kwargs.get('path', '/') + encodingsP = self.path + "/encodings.pickle" + cascade = self.path + "/haarcascade_frontalface_default.xml" + self.last_face = None + self.last_save = None + + # Encodings dosyasının varlığını kontrol et + if os.path.exists(encodingsP): + self.data = pickle.loads(open(encodingsP, "rb").read()) + self.faceCounts = Counter(self.data['names']) + else: + pub.sendMessage('log:warning', msg=f"[Faces] Encodings file not found: {encodingsP}") + self.data = {"encodings": [], "names": []} + self.faceCounts = Counter() + + # Cascade dosyasının varlığını kontrol et + if os.path.exists(cascade): + self.detector = kwargs.get('detector', cv2.CascadeClassifier(cascade)) + else: + pub.sendMessage('log:warning', msg=f"[Faces] Cascade file not found: {cascade}") + # OpenCV'nin dahili cascade dosyasını kullan + self.detector = kwargs.get('detector', cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')) + + pub.subscribe(self.nomatch, 'vision:nomatch') + self.state = kwargs.get('state') # the personality instance + self.last_face = None + self.last_face_name = None + self.current_faces = [] + self.face_detected = None + + # Öncelikli kişiler ve animasyonları için değişkenler + self.priority_animations = {} # Öncelikli kişiler için animasyonlar + self.priority_file = os.path.join(self.path, "priority_animations.json") # Konfigürasyon dosyası + + # Abonelikler + pub.subscribe(self.face, 'vision:detect:face') + pub.subscribe(self.noface, 'vision:nomatch') + pub.subscribe(self.update_priority_animation, 'priority:update_animation') + pub.subscribe(self.add_priority_person, 'priority:add_person') + pub.subscribe(self.remove_priority_person, 'priority:remove_person') + pub.subscribe(self.handle_priority_person_detected, 'priority:person_detected') + + # Kayıtlı öncelikli kişileri yükle + self.load_priority_animations() + + # Bu sınıfın aktif olduğunu log'da belirt + pub.sendMessage('log:info', msg=f"[RemoteFaces] Initialized with {len(self.priority_animations)} priority animations") + + def load_priority_animations(self): + """Kayıtlı öncelikli kişi animasyonlarını yükle""" + try: + if os.path.exists(self.priority_file): + with open(self.priority_file, 'r') as f: + self.priority_animations = json.load(f) + pub.sendMessage('log:info', msg=f"[RemoteFaces] Loaded {len(self.priority_animations)} priority animations") + else: + pub.sendMessage('log:info', msg=f"[RemoteFaces] No priority file found at {self.priority_file}") + self.priority_animations = {} + except Exception as e: + pub.sendMessage('log:error', msg=f"[RemoteFaces] Error loading priority animations: {e}") + self.priority_animations = {} + + def add_priority_person(self, person_name): + """Öncelikli kişi ekle""" + if person_name not in self.priority_animations: + self.priority_animations[person_name] = "RAINBOW_CYCLE" # Varsayılan animasyon + pub.sendMessage('log:info', msg=f"[RemoteFaces] Added priority person: {person_name}") + self.save_priority_animations() + return True + return False + + def remove_priority_person(self, person_name): + """Öncelikli kişiyi kaldır""" + if person_name in self.priority_animations: + del self.priority_animations[person_name] + pub.sendMessage('log:info', msg=f"[RemoteFaces] Removed priority person: {person_name}") + self.save_priority_animations() + return True + return False + + def handle_priority_person_detected(self, person_name, animation_name="RAINBOW_CYCLE"): + """Öncelikli kişi algılandığında çağrılır""" + pub.sendMessage('log:info', msg=f"[RemoteFaces] Priority person detected: {person_name}, animation: {animation_name}") + + # Kişi öncelikli listede var mı kontrol et, yoksa ekle + if person_name not in self.priority_animations: + self.add_priority_person(person_name) + # Eklendikten sonra animasyonu ayarla + self.update_priority_animation(person_name, animation_name) + + # Animasyonu çalıştır + pub.sendMessage('led:animate', animation=animation_name, color="GREEN", repeat=2) + + # Selam ver (opsiyonel) + greeting = f"Merhaba {person_name}" + pub.sendMessage('tts', msg=greeting) + + def save_priority_animations(self): + """Öncelikli kişi animasyonlarını kaydet""" + try: + # Dizin yoksa oluştur + os.makedirs(os.path.dirname(self.priority_file), exist_ok=True) + + with open(self.priority_file, 'w') as f: + json.dump(self.priority_animations, f, indent=2) + pub.sendMessage('log:info', msg=f"[RemoteFaces] Saved {len(self.priority_animations)} priority animations") + except Exception as e: + pub.sendMessage('log:error', msg=f"[RemoteFaces] Error saving priority animations: {e}") + + def update_priority_animation(self, person_name, animation_name): + """GUI'den öncelikli kişi-animasyon eşleştirmesi güncelleme""" + self.priority_animations[person_name] = animation_name + pub.sendMessage('log:info', msg=f"[RemoteFaces] Updated animation for {person_name}: {animation_name}") + self.save_priority_animations() # Değişiklikleri kaydet + + def nomatch(self): + self.last_face = None + + def noface(self): + """Yüz algılanmadığında çağrılır""" + if self.face_detected: + pub.sendMessage('log:info', msg='[RemoteFaces] No face matches found') + self.face_detected = False + + # Kişilik modülü varsa göz rengini değiştir + if self.state and hasattr(self, 'set_eye'): + self.state.set_eye('blue') + + def detect(self, rgb, matches, final_match): + """Yüz tanıma algoritmasıyla yüzleri eşleştir""" + if rgb is None or matches is None: + raise Exception('Inputs not found') + + boxes = [(y, x + w, y + h, x) for (x, y, w, h) in matches] + encodings = face_recognition.face_encodings(rgb, boxes) + names = [] + + for encoding in encodings: + matched_faces = face_recognition.compare_faces(self.data["encodings"], encoding) + name = Faces.UNKNOWN_LABEL + + if True in matched_faces: + matchedIdxs = [i for (i, b) in enumerate(matched_faces) if b] + counts = {} + + for i in matchedIdxs: + nm = self.data["names"][i] + counts[nm] = counts.get(nm, 0) + 1 + + biggestHit = max(counts, key=counts.get) + # Threshold kontrolü - yeterince eşleşme yoksa tanınmayan olarak işaretle + if counts[biggestHit] > (self.faceCounts[biggestHit] / 100 * Faces.MATCH_THRESHOLD_PERCENT): + name = biggestHit + + # Yeni kişi algılandığında olayı tetikle + if self.last_face != name: + self.last_face = name + # RemoteVision modülünde kullanılmak üzere öncelikli kişi durumunu ekle + is_priority = name in self.priority_animations + if is_priority: + animation = self.priority_animations.get(name, "RAINBOW_CYCLE") + pub.sendMessage('log:info', msg=f'[RemoteFaces] Detecting priority person: {name} (animation: {animation})') + + # Yüz algılama olayını yayınla + pub.sendMessage('vision:detect:face', name=name) + + # İsmi listeye ekle + names.append(name) + + # Eğer yüz tanındıysa periyodik olarak görüntüyü kaydet + if self.last_save is None or self.last_save < time.time() - 5: + if final_match: + self.last_save = time.time() + + for name in names: + try: + # Dizin oluştur (yoksa) + match_dir = os.path.join(self.path, 'matches', name) + os.makedirs(match_dir, exist_ok=True) + + # Görüntüyü kaydet + cv2.imwrite(os.path.join(match_dir, f"{int(time.time() * 1000)}.jpg"), rgb) + except Exception as e: + pub.sendMessage('log:error', msg=f"[RemoteFaces] Error saving match image: {e}") + + return names + + def face(self, name): + """Yüz algılandığında tetiklenir (GUI veya vision:detect:face olayı tarafından)""" + if not self.face_detected: + pub.sendMessage('log:info', msg=f'[RemoteFaces] Face detected: {name}') + + self.face_detected = True + self.last_face = datetime.now() + + # Göz rengini yeşil yap (kişilik modülü varsa) + if self.state and hasattr(self.state, 'set_eye'): + self.state.set_eye('green') + + # Öncelikli kişi için animasyon kontrolü + is_priority = name in self.priority_animations + if is_priority: + animation_name = self.priority_animations[name] + pub.sendMessage('log:info', msg=f'[RemoteFaces] Running priority animation for {name}: {animation_name}') + + # Animasyonu çalıştır + pub.sendMessage('led:animate', animation=animation_name, color="GREEN", repeat=2) + + # Servo için takip etme komutunu da gönder + pub.sendMessage('priority_person:detected', name=name) + + # Yeni bir kişi algılandıysa, karşılama yap + if name not in self.current_faces: + self.current_faces.append(name) + + # Öncelikli kişi değilse varsayılan animasyon + if not is_priority: + pub.sendMessage('led:animate', animation='SPINNER', color='BLUE', repeat=1) + + # Selamlama + if name.lower() != 'unknown': + greeting = f"Merhaba {name}" + pub.sendMessage('tts', msg=greeting) + + if name.lower() != 'unknown': + self.last_face_name = name diff --git a/modules/vision/remote/network/camera_streamer.py b/modules/vision/remote/network/camera_streamer.py new file mode 100644 index 00000000..8ada35c9 --- /dev/null +++ b/modules/vision/remote/network/camera_streamer.py @@ -0,0 +1,261 @@ +import io +import logging +import socketserver +from http import server +from threading import Condition, Thread +import asyncio +import time +from pubsub import pub +import socket +import os +import gc +import cv2 +import numpy as np + +class StreamingOutput: + def __init__(self): + self.frame = None + self.condition = Condition() + + def update_frame(self, frame): + with self.condition: + self.frame = frame + self.condition.notify_all() + +class CameraStreamer: + def __init__(self, **kwargs): + self.port = kwargs.get('port', 8000) + self.resolution = kwargs.get('resolution', (1536, 864)) + self.flip_horizontally = kwargs.get('flip_horizontally', False) + self.flip_vertically = kwargs.get('flip_vertically', True) # Varsayılan olarak dikey çevirmeyi açık yap + self.rotate_180 = kwargs.get('rotate_180', False) + self.fps = kwargs.get('fps', 120) + self.frame_interval = 1.0 / self.fps + self.quality = kwargs.get('quality', 80) + self.verbose_logging = kwargs.get('verbose_logging', False) + self.running = False + self.output = StreamingOutput() + self.picam = None + self.http_server = None + + self.server_ip = self.get_local_ip() + pub.subscribe(self.stop_streaming, 'exit') + pub.subscribe(self.stop_streaming, 'camera:stop_streaming') + pub.subscribe(self.start_streaming, 'camera:start_streaming') + + if kwargs.get('autostart', True): + Thread(target=self.start_streaming).start() + + def get_local_ip(self): + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + ip = s.getsockname()[0] + s.close() + return ip + except Exception: + return "127.0.0.1" + + async def capture_frames(self): + from picamera2 import Picamera2 + from libcamera import Transform + + try: + self.picam = Picamera2() + + # Create an appropriate transform configuration + transform = None + if self.flip_horizontally or self.flip_vertically or self.rotate_180: + # Transform için gerekli parametreleri hesapla + hflip = 1 if self.flip_horizontally else 0 + vflip = 1 if self.flip_vertically else 0 + + # If rotate_180 is true, flip both horizontally and vertically + if self.rotate_180: + hflip = 1 - hflip # Toggle current hflip value + vflip = 1 - vflip # Toggle current vflip value + + transform = Transform(hflip=hflip, vflip=vflip) + pub.sendMessage('log', msg=f"[CameraStreamer] Camera transform: hflip={hflip}, vflip={vflip}") + + config = self.picam.create_preview_configuration( + main={"size": self.resolution, "format": "RGB888"}, transform=transform + ) + self.picam.configure(config) + self.picam.start() + + pub.sendMessage('log', msg=f"[CameraStreamer] Camera started with resolution {self.resolution}") + + frame_count = 0 + last_log_time = time.time() + + while self.running: + start_time = time.time() + frame = self.picam.capture_array() + + # Skip frame processing if performance is low + frame_count += 1 + if frame_count % 5 == 0: # Process only every 5th frame for diagnostics + current_time = time.time() + if current_time - last_log_time >= 10: # Log every 10 seconds + fps = frame_count / (current_time - last_log_time) + pub.sendMessage('log', msg=f"[CameraStreamer] Streaming at {fps:.1f} FPS") + frame_count = 0 + last_log_time = current_time + + # Optimize quality based on network conditions + quality = self.quality + if hasattr(self, 'adaptive_quality') and self.adaptive_quality: + # Adjust quality based on transmission time + if hasattr(self, 'last_transmit_time') and self.last_transmit_time > 0.05: + quality = max(40, quality - 10) # Reduce quality if network is slow + elif hasattr(self, 'last_transmit_time') and self.last_transmit_time < 0.01: + quality = min(90, quality + 5) # Increase quality if network is fast + + # Encode frame as JPEG + try: + _, jpeg_data = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, quality]) + encode_time = time.time() - start_time + + # Update the output with the new frame + transmit_start = time.time() + self.output.update_frame(jpeg_data.tobytes()) + self.last_transmit_time = time.time() - transmit_start + + total_time = time.time() - start_time + + # Adjust sleep time to maintain target fps + sleep_time = max(0, (1.0 / 120) - total_time) + await asyncio.sleep(sleep_time) + + except Exception as e: + pub.sendMessage('log:error', msg=f"[CameraStreamer] Frame encoding error: {e}") + await asyncio.sleep(0.1) # Brief delay before retrying + + except Exception as e: + pub.sendMessage('log:error', msg=f"[CameraStreamer] Error in capture_frames: {e}") + self.running = False + if self.picam: + self.picam.stop() + self.picam = None + + def start_streaming(self): + if self.running: + pub.sendMessage('log', msg="[CameraStreamer] Already streaming") + return + + self.running = True + pub.sendMessage('log', msg=f"[CameraStreamer] Starting stream on port {self.port}") + Thread(target=self.start_http_server, daemon=True).start() + + try: + asyncio.run(self.capture_frames()) + except Exception as e: + pub.sendMessage('log:error', msg=f"[CameraStreamer] Error in asyncio: {e}") + + def start_http_server(self): + class StreamingHandler(server.BaseHTTPRequestHandler): + def do_GET(self): + if self.path == '/': + self.send_response(301) + self.send_header('Location', '/index.html') + self.end_headers() + elif self.path == '/index.html': + content = f""" + + + SentryBOT Camera Stream + + + +
+

SentryBOT Camera Stream

+ Camera Stream +

Resolution: {self.server.resolution[0]}x{self.server.resolution[1]} - FPS: {self.server.fps}

+

Server IP: {self.server.server_ip}

+
+ + + """ + content_bytes = content.encode('utf-8') + self.send_response(200) + self.send_header('Content-Type', 'text/html') + self.send_header('Content-Length', len(content_bytes)) + self.end_headers() + self.wfile.write(content_bytes) + elif self.path == '/stream.mjpg': + self.send_response(200) + self.send_header('Age', 0) + self.send_header('Cache-Control', 'no-cache, private') + self.send_header('Pragma', 'no-cache') + self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME') + self.end_headers() + try: + while True: + with self.server.output.condition: + self.server.output.condition.wait() + frame = self.server.output.frame + self.wfile.write(b'--FRAME\r\n') + self.send_header('Content-Type', 'image/jpeg') + self.send_header('Content-Length', len(frame)) + self.end_headers() + self.wfile.write(frame) + self.wfile.write(b'\r\n') + except Exception as e: + if self.server.verbose_logging: + pub.sendMessage('log', msg=f"[CameraStreamer] Streaming error: {e}") + else: + self.send_error(404) + self.end_headers() + + def log_message(self, format, *args): + if self.server.verbose_logging: + pub.sendMessage('log', msg=f"[CameraStreamer] {format % args}") + + class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer): + allow_reuse_address = True + daemon_threads = True + output = self.output + verbose_logging = self.verbose_logging + resolution = self.resolution + fps = self.fps + server_ip = self.server_ip + + try: + address = ('', self.port) + self.http_server = StreamingServer(address, StreamingHandler) + pub.sendMessage('log', msg=f"[CameraStreamer] HTTP server running at http://{self.server_ip}:{self.port}/") + pub.sendMessage('log', msg=f"[CameraStreamer] MJPEG stream available at http://{self.server_ip}:{self.port}/stream.mjpg") + self.http_server.serve_forever() + except Exception as e: + pub.sendMessage('log:error', msg=f"[CameraStreamer] HTTP server error: {e}") + self.running = False + + def stop_streaming(self): + if not self.running: + return + + self.running = False + pub.sendMessage('log', msg="[CameraStreamer] Stopping stream") + + if self.picam: + try: + self.picam.stop() + except Exception as e: + pub.sendMessage('log:error', msg=f"[CameraStreamer] Error stopping camera: {e}") + self.picam = None + + if self.http_server: + try: + self.http_server.shutdown() + except Exception as e: + pub.sendMessage('log:error', msg=f"[CameraStreamer] Error shutting down HTTP server: {e}") + self.http_server = None + + gc.collect() + pub.sendMessage('log', msg="[CameraStreamer] Stream stopped") diff --git a/modules/vision/remote/network/command_receiver.py b/modules/vision/remote/network/command_receiver.py new file mode 100644 index 00000000..c2548be8 --- /dev/null +++ b/modules/vision/remote/network/command_receiver.py @@ -0,0 +1,819 @@ +import socket + +import json + +import threading + +import time + +import sys + +from pubsub import pub + +import traceback # Hata ayıklama için + +import logging # Use standard logging if available + +PI_SERVO_ANIMATIONS = { + "BOUNCE", "CELEBRATE", "HEAD_LEFT", "HEAD_NOD", "HEAD_NOD_ABS", + "HEAD_RIGHT", "HEAD_SHAKE", "HEAD_SHAKE_ABS", "LOOK_DOWN", "LOOK_UP", + "RAISED", "SCAN", "SIT", "SLEEP" +} + + + +class CommandReceiver: + + def __init__(self, **kwargs): + + """ + + Receives commands from a remote computer (GUI) via socket and publishes + + corresponding pubsub events for other robot modules to handle. + + + + Configuration kwargs: + + - port: Port number for command server (default: 8090) + + - reuse_port: Allow port reuse (default: True, Linux only effective) + + - autostart: Start the server automatically (default: True) + + - max_retries: Max attempts to bind the port (default: 5) + + - retry_delay: Delay between retry attempts in seconds (default: 1) + + """ + + self.port = kwargs.get('port', 8090) + + self.reuse_port = kwargs.get('reuse_port', True) + + self.max_retries = kwargs.get('max_retries', 5) + + self.retry_delay = kwargs.get('retry_delay', 1) + + self.running = False + + self.server_socket = None + + self._client_threads = [] # Aktif client thread'lerini takip etmek için + + self._lock = threading.Lock() # Thread güvenliği için + + + + self.handlers = { + 'send_animation': self.handle_send_animation, + 'motion_event': self.handle_motion_event, + 'set_processing_mode': self.handle_processing_mode, + 'log': self.handle_log, + 'get_status': self.handle_get_status, + 'set_state': self.handle_set_state, + 'set_eye': self.handle_set_eye, + 'servo_move': self.handle_servo_move, + } + + pub.subscribe(self.stop_receiver, 'exit') # Uygulama kapanırken durdur + + + + if kwargs.get('autostart', True): + + # Sunucuyu ayrı bir thread'de başlat + + thread = threading.Thread(target=self.start_receiver, daemon=True) + + thread.start() + + + + def start_receiver(self): + + """Start the command receiver server.""" + + if self.running: + + pub.sendMessage('log:info', msg=f"[CommandReceiver] Already running on port {self.port}") + + return + + + + self.running = True + + pub.sendMessage('log:info', msg=f"[CommandReceiver] Starting command receiver on port {self.port}") + + + + for attempt in range(self.max_retries): + + try: + + self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + # Tekrar kullanım ayarları + + self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + if self.reuse_port and hasattr(socket, 'SO_REUSEPORT'): + + try: + + self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + + except OSError: + + pub.sendMessage('log:warning', msg="[CommandReceiver] SO_REUSEPORT not supported or failed.") + + + + # accept() fonksiyonunun bloklamasını önlemek için timeout + + self.server_socket.settimeout(1.0) + + self.server_socket.bind(('0.0.0.0', self.port)) + + self.server_socket.listen(5) + + pub.sendMessage('log:info', msg=f"[CommandReceiver] Listening for commands on 0.0.0.0:{self.port}") + + + + # Başarılı olursa bağlantı kabul etme döngüsünü başlat + + self._accept_connections() + + return # _accept_connections bittiğinde veya hata olduğunda çıkar + + + + except OSError as e: + + if self.server_socket: + + self.server_socket.close() + + self.server_socket = None + + + + if e.errno == 98: # Address already in use + + if attempt < self.max_retries - 1: + + wait_time = self.retry_delay * (attempt + 1) + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Port {self.port} in use. Retrying in {wait_time}s... (Attempt {attempt + 1}/{self.max_retries})") + + time.sleep(wait_time) + + else: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Port {self.port} still in use after {self.max_retries} attempts. Failed to start.") + + self.running = False + + return + + else: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Socket OS Error on startup (bind/listen): {e}") + + self.running = False + + return + + except Exception as e: + + if self.server_socket: + + self.server_socket.close() + + self.server_socket = None + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Unexpected error on startup: {e}") + + pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}") + + self.running = False + + return + + + + if not self.running: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Failed to start after all attempts.") + + + + def _accept_connections(self): + + """Accept incoming connections and handle clients in separate threads.""" + + while self.running: + + client_socket = None + + try: + + # server_socket None ise veya kapatılmışsa çık + + if not self.server_socket: + + pub.sendMessage('log:info', msg="[CommandReceiver] Server socket is closed. Stopping accept loop.") + + break + + + + # accept() timeout ile çalışır (1 saniye) + + client_socket, addr = self.server_socket.accept() + + pub.sendMessage('log:info', msg=f"[CommandReceiver] Connection accepted from {addr}") + + + + # KeepAlive ayarları (isteğe bağlı, bağlantı kopmalarını algılamak için) + + try: + + client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # Linux için daha detaylı ayarlar + + if hasattr(socket, "TCP_KEEPIDLE"): client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60) + + if hasattr(socket, "TCP_KEEPINTVL"): client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 10) + + if hasattr(socket, "TCP_KEEPCNT"): client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3) + + except OSError as ke_err: + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Could not set KeepAlive options for {addr}: {ke_err}") + + + + + + # Her istemci için ayrı bir thread başlat + + client_thread = threading.Thread(target=self._handle_client, args=(client_socket, addr), daemon=True) + + client_thread.start() + + with self._lock: + + self._client_threads.append(client_thread) + + # Tamamlanmış thread'leri temizle (isteğe bağlı) + + self._cleanup_threads() + + + + except socket.timeout: + + # Timeout normal, döngüye devam et ve self.running kontrol et + + continue + + except OSError as e: + + # Soket kapatıldığında (stop_receiver) veya başka bir OS hatasında + + if self.running: # Sadece bilerek durdurulmadıysa logla + + pub.sendMessage('log:error', msg=f"[CommandReceiver] OS Error during accept(): {e}") + + break # Hata durumunda döngüden çık + + except Exception as e: + + if self.running: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Unexpected error accepting connection: {e}") + + pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}") + + if client_socket: client_socket.close() # Hata oluştuysa soketi kapat + + break # Beklenmedik hatalarda döngüden çık + + + + pub.sendMessage('log:info', msg="[CommandReceiver] Accept loop finished.") + + self.running = False # Döngü bittiğinde durumu güncelle + + + + def _handle_client(self, client_socket, addr): + + """Handle communication with a single connected client.""" + + buffer_size = 4096 + + client_socket.settimeout(60.0) # İstemciden veri gelmezse 60 saniye bekle + + full_data = b"" + + separator = b'\n' # Komut ayıracı olarak newline + + + + pub.sendMessage('log:debug', msg=f"[CommandReceiver] Handling client {addr}") + + + + try: + + while self.running: + + try: + + chunk = client_socket.recv(buffer_size) + + if not chunk: + + pub.sendMessage('log:info', msg=f"[CommandReceiver] Client {addr} disconnected.") + + break # İstemci bağlantıyı kapattı + + + + full_data += chunk + + + + # Tam komutları işle (newline ile ayrılmış) + + while separator in full_data: + + command_part, full_data = full_data.split(separator, 1) + + if command_part: + + self._process_command(command_part, client_socket, addr) + + + + except socket.timeout: + + # Timeout oldu, belki bir keepalive gönderilebilir veya sadece beklenir + + # pub.sendMessage('log:debug', msg=f"[CommandReceiver] Timeout waiting for data from {addr}.") + + # Bağlantıyı kontrol etmek için boş byte göndermeyi dene + + try: + + client_socket.send(b'') + + except OSError: + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Client {addr} connection lost after timeout check.") + + break + + continue # Timeout normal, dinlemeye devam et + + except ConnectionResetError: + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Connection reset by client {addr}.") + + break + + except OSError as e: + + # Soket hatası (örn: kapatılmış sokete yazma/okuma) + + if self.running: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Socket OS Error with client {addr}: {e}") + + break + + except Exception as e: + + if self.running: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Unexpected error receiving from {addr}: {e}") + + pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}") + + break # Diğer hatalarda çık + + + + except Exception as e: + + # Thread genelinde beklenmedik bir hata + + if self.running: + + pub.sendMessage('log:error', msg=f"[CommandReceiver] Error in client handler thread for {addr}: {e}") + + pub.sendMessage('log:debug', msg=f"Traceback: {traceback.format_exc()}") + + finally: + + pub.sendMessage('log:info', msg=f"[CommandReceiver] Closing connection with {addr}") + + try: + + client_socket.shutdown(socket.SHUT_RDWR) + + except OSError: + + pass # Zaten kapalıysa veya hata verirse önemseme + + except Exception as shut_e: + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Error during socket shutdown for {addr}: {shut_e}") + + try: + + client_socket.close() + + except Exception as close_e: + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Error during socket close for {addr}: {close_e}") + + + + + + def _process_command(self, data, client_socket, addr): + + """Parse the received data as JSON and publish corresponding pubsub events.""" + response_data = {'status': 'error', 'message': 'Unknown processing error'} + command_type = "unknown" + params = {} + + try: + decoded_data = data.decode('utf-8').strip() + if not decoded_data: + return + + command_data = json.loads(decoded_data) + command_type = command_data.get('command', 'unknown') + params = command_data.get('params', {}) + + pub.sendMessage('log:debug', msg=f"[CommandReceiver] DEBUG: Received RAW command data: {command_data}") + pub.sendMessage('log:info', msg=f"[CommandReceiver] RX from {addr}: CMD={command_type}, PARAMS={params}") + + response_data = self.process_command({'CMD': command_type, 'PARAMS': params}) + + try: + response_json = json.dumps(response_data) + '\n' + client_socket.sendall(response_json.encode('utf-8')) + pub.sendMessage('log:debug', msg=f"[CommandReceiver] Response sent: {response_data}") + except Exception as send_err: + pub.sendMessage('log:error', msg=f"[CommandReceiver] Error sending response: {send_err}") + + except json.JSONDecodeError: + pub.sendMessage('log:error', msg=f"[CommandReceiver] Invalid JSON: {data.decode('utf-8', errors='ignore')[:100]}") + response = {'status': 'error', 'message': 'Invalid JSON format'} + try: + client_socket.sendall((json.dumps(response) + '\n').encode('utf-8')) + except Exception as send_err: + pub.sendMessage('log:error', msg=f"[CommandReceiver] Error sending JSON error response: {send_err}") + + except Exception as e: + pub.sendMessage('log:error', msg=f"[CommandReceiver] Error processing command: {e}") + response = {'status': 'error', 'message': f"Internal error: {e}", 'command': command_type} + try: + client_socket.sendall((json.dumps(response) + '\n').encode('utf-8')) + except Exception as send_err: + pub.sendMessage('log:error', msg=f"[CommandReceiver] Error sending error response: {send_err}") + + + + def process_command(self, command_json): + """Process a command using registered handlers.""" + command = command_json.get('CMD') + params = command_json.get('PARAMS', {}) + + if command in self.handlers: + try: + result = self.handlers[command](params) + return {'status': 'ok', 'message': result or f'{command} executed'} + except Exception as e: + pub.sendMessage('log:error', msg=f"Error handling command '{command}': {e}") + logging.exception(f"Error handling command '{command}'") + return {'status': 'error', 'message': str(e)} + else: + pub.sendMessage('log:warning', msg=f"Unknown command received: {command}") + return {'status': 'error', 'message': f'Unknown command: {command}'} + + + + def handle_log(self, params): + """Handles log messages sent from the GUI.""" + message = params.get('message', '') + level = params.get('level', 'info').lower() + log_msg = f"[GUI] {message}" + + if level == 'error': + pub.sendMessage('log:error', msg=log_msg) + elif level == 'warning': + pub.sendMessage('log:warning', msg=log_msg) + else: + pub.sendMessage('log:info', msg=log_msg) + return "Log message received" + + + + def handle_motion_event(self, params): + """Forwards motion event data via pubsub.""" + pub.sendMessage('motion_event', data=params) + return "Motion event forwarded" + + + + def handle_processing_mode(self, params): + """Forwards processing mode change via pubsub.""" + pub.sendMessage('set_processing_mode', data=params) + return "Processing mode change forwarded" + + + def handle_send_animation(self, params): + """ + Forwards animation request via pubsub. + Servo animasyonları actuator/servo.py ve animate.py ile uyumlu şekilde, + NeoPixel animasyonları ise led:animate ile gönderilir. + """ + animation_name = params.get('animation') + color = params.get('color') + color2 = params.get('color2') + repeat = params.get('repeat', 1) + + if not animation_name: + pub.sendMessage('log:error', msg="[CommandReceiver] Missing animation name.") + return {"status": "error", "message": "Missing animation name"} + + animation_name_upper = animation_name.upper() + + if animation_name_upper in PI_SERVO_ANIMATIONS: + # Servo animasyonları actuator/servo.py ve animate.py ile uyumlu şekilde gönderilecek + try: + for _ in range(int(repeat)): + pub.sendMessage('animate', action=animation_name.lower()) + log_msg = f"Servo animation sent to Animate module: {animation_name} (repeat={repeat})" + pub.sendMessage('log:info', msg=log_msg) + return {"status": "ok", "message": log_msg} + except Exception as e: + error_msg = f"Error routing servo animation: {e}" + pub.sendMessage('log:error', msg=f"[CommandReceiver] {error_msg}") + pub.sendMessage('log:debug', msg=traceback.format_exc()) + return {"status": "error", "message": error_msg} + else: + # NeoPixel animasyonlarını çalıştırmak için + try: + pub.sendMessage('led:animate', + animation=animation_name, + color=color, + color2=color2, + repeat=repeat) + log_msg = f"Forwarded to NeoPx module: {animation_name}" + pub.sendMessage('log:info', msg=log_msg) + return {"status": "ok", "message": log_msg} + except Exception as e: + error_msg = f"Error in NeoPx module: {e}" + pub.sendMessage('log:error', msg=f"[CommandReceiver] {error_msg}") + pub.sendMessage('log:debug', msg=traceback.format_exc()) + return {"status": "error", "message": error_msg} + + + def handle_get_status(self, params): + pub.sendMessage('cmd:personality:get_status') + return "Status request forwarded" + + + + def handle_set_state(self, params): + pub.sendMessage('cmd:personality:set_state', state=params.get('state')) + return "Set state request forwarded" + + + + def handle_set_eye(self, params): + pub.sendMessage('cmd:personality:set_eye', color=params.get('color')) + return "Set eye color request forwarded" + + + + def handle_servo_move(self, params): + pub.sendMessage('servo:move', data=params) + return "Servo move request forwarded" + + + + def _cleanup_threads(self): + + """Remove completed client threads from the list.""" + + with self._lock: + + self._client_threads = [t for t in self._client_threads if t.is_alive()] + + + + def stop_receiver(self): + + """Stop the command receiver server.""" + + if not self.running: + + return + + + + pub.sendMessage('log:info', msg="[CommandReceiver] Stopping command receiver...") + + self.running = False # Thread'lerin durması için işaret ver + + + + # Sunucu soketini kapat (accept() bloklamasını kaldırabilir) + + if self.server_socket: + + server_socket_to_close = self.server_socket + + self.server_socket = None # Tekrar kullanılmasını engelle + + try: + + server_socket_to_close.close() + + pub.sendMessage('log:info', msg="[CommandReceiver] Server socket closed.") + + except Exception as e: + + pub.sendMessage('log:warning', msg=f"[CommandReceiver] Error closing server socket: {e}") + + + + # Aktif istemci thread'lerini bekle (kısa bir süre) + + with self._lock: + + threads_to_join = self._client_threads[:] + + for thread in threads_to_join: + + try: + + thread.join(timeout=0.5) # Max 0.5 saniye bekle + + except Exception as e: + + pub.sendMessage('log:warning', f"[CommandReceiver] Error joining client thread: {e}") + + + + self._cleanup_threads() # Son temizlik + + pub.sendMessage('log:info', msg="[CommandReceiver] Stopped.") + + + +# Örnek Kullanım (Eğer bu dosya direkt çalıştırılırsa test için) + +if __name__ == '__main__': + + from pubsub import pub + + import logging + + logging.basicConfig(level=logging.DEBUG) + + + + # Basit bir log listener (pubsub mesajlarını yazdırmak için) + + class LogListener: + + def __call__(self, msg, level='debug'): + + print(f"[{level.upper()}] {msg}") + + def info(self, msg): print(f"[INFO] {msg}") + + def warning(self, msg): print(f"[WARNING] {msg}") + + def error(self, msg): print(f"[ERROR] {msg}") + + def debug(self, msg): print(f"[DEBUG] {msg}") + + + + log_listener = LogListener() + + pub.subscribe(log_listener.info, 'log:info') + + pub.subscribe(log_listener.warning, 'log:warning') + + pub.subscribe(log_listener.error, 'log:error') + + pub.subscribe(log_listener.debug, 'log:debug') + + + + # Başka modüllerden gelen komutları dinleyen basit listener'lar + + def handle_servo(servo, value, absolute): + + print(f"[SERVO HANDLER] Received: servo={servo}, value={value}, absolute={absolute}") + + def handle_animate(action): + + print(f"[ANIMATE HANDLER] Received action: {action}") + + def handle_tts(text, lang): + + print(f"[TTS HANDLER] Received text: '{text}', lang: {lang}") + + def handle_buzzer(song): + + print(f"[BUZZER HANDLER] Received song: {song}") + + def handle_personality_set(name): + + print(f"[PERSONALITY HANDLER] Set personality: {name}") + + def handle_pers_get_list(): + + print(f"[PERSONALITY HANDLER] Get list requested") + + # Gerçek uygulamada burada pub.sendMessage('data:personalities', ...) yapılır + + def handle_pers_set_eye(color): + + print(f"[PERSONALITY HANDLER] Set eye: {color}") + + def handle_pers_set_state(state): + + print(f"[PERSONALITY HANDLER] Set state: {state}") + + def handle_pers_get_status(): + + print(f"[PERSONALITY HANDLER] Get status requested") + + # Gerçek uygulamada burada pub.sendMessage('data:robot_status', ...) yapılır + + + + pub.subscribe(handle_servo, 'cmd:servo') + + pub.subscribe(handle_animate, 'cmd:animate') + + pub.subscribe(handle_tts, 'cmd:tts') + + pub.subscribe(handle_buzzer, 'cmd:buzzer') + + pub.subscribe(handle_personality_set, 'cmd:personality:set') + + pub.subscribe(handle_pers_get_list, 'cmd:personality:get_list') + + pub.subscribe(handle_pers_set_eye, 'cmd:personality:set_eye') + + pub.subscribe(handle_pers_set_state, 'cmd:personality:set_state') + + pub.subscribe(handle_pers_get_status, 'cmd:personality:get_status') + + + + + + print("Starting Command Receiver for testing...") + + receiver = CommandReceiver(port=8090, autostart=False) + + receiver.start_receiver() # Ayrı thread'de başlamaz, direkt çalışır + + + + print("Command Receiver running. Press Ctrl+C to stop.") + + try: + + while receiver.running: + + time.sleep(1) + + except KeyboardInterrupt: + + print("\nCtrl+C received, stopping...") + + receiver.stop_receiver() + + print("Test finished.") + diff --git a/modules/vision/remote/network/robot_to_gui_sender.py b/modules/vision/remote/network/robot_to_gui_sender.py new file mode 100644 index 00000000..6d55bb49 --- /dev/null +++ b/modules/vision/remote/network/robot_to_gui_sender.py @@ -0,0 +1,386 @@ +import socket +import json +import threading +import time +from pubsub import pub +import traceback + +class RobotToGuiSender: + def __init__(self, **kwargs): + """ + Listens to specific pubsub topics on the robot and forwards the data + to the remote GUI listener over a socket connection. + + Configuration kwargs: + - gui_ip: IP address of the computer running the GUI (required). + - gui_port: Port number the GUI listener is running on (required). + - reconnect_delay: Delay in seconds before attempting to reconnect (default: 5). + - forward_topics: List of pubsub topics to listen to and forward (required). + - autostart: Start the sender automatically (default: True). + """ + self.gui_ip = kwargs.get('gui_ip') + self.gui_port = kwargs.get('gui_port') + self.reconnect_delay = kwargs.get('reconnect_delay', 5) + self.forward_topics = kwargs.get('forward_topics', []) + self.running = False + self.connected = False + self.client_socket = None + self._send_lock = threading.Lock() # Sokete yazma iÅŸlemleri için kilit + self._thread = None + self.data_topics_to_forward = kwargs.get('forward_topics', []) + + # --- GUI IP ve Port kontrolü --- + if not self.gui_ip or not self.gui_port: + # DoÄŸrudan loglama kullan, çünkü pubsub döngüye neden olabilir + print("[GuiSender] ERROR: GUI IP address and Port are required in configuration!") + self._autostart = False + # Hata durumunda modülün yüklenmesini engellemek daha iyi olabilir + # raise ValueError("GUI IP and Port configuration missing for RobotToGuiSender") + return + else: + self._autostart = kwargs.get('autostart', True) + + # --- PubSub Abonelikleri --- + # NOT: Kendi ürettiÄŸi logları dinlememesi için log:* topiclerine abone OLMAYACAK. + # Loglar doÄŸrudan loglama sistemi tarafından ele alınmalı. + # Sadece veri topiclerine abone olmalı. + self.data_topics_to_forward = [t for t in self.forward_topics if not t.startswith('log:')] + + if self.data_topics_to_forward: + for topic in self.data_topics_to_forward: + try: + # handle_message'ı doğrudan kullanmak yerine topic'e özel handler tanımlayabiliriz + # veya handle_message içinde topic adına göre dallanabiliriz. + # Şimdilik handle_message kullanalım. + pub.subscribe(self.handle_message, topic) + print(f"[GuiSender] INFO: Subscribed to data topic: {topic}") + except Exception as e: + print(f"[GuiSender] ERROR: Failed to subscribe to topic {topic}: {e}") + else: + print("[GuiSender] WARNING: No non-log topics specified to forward in configuration.") + + # --- Exit Sinyali --- + pub.subscribe(self.stop_sender, 'exit') + + # --- Otomatik BaÅŸlatma --- + if self._autostart: + self.start_sender() + + def start_sender(self): + """Starts the sender thread which tries to connect and send data.""" + if self.running: + print("[GuiSender] INFO: Sender already running.") + return + + self.running = True + self._thread = threading.Thread(target=self._run, daemon=True) + self._thread.start() + print(f"[GuiSender] INFO: Started. Attempting to connect to GUI at {self.gui_ip}:{self.gui_port}") + + def _connect(self): + """Attempts to establish a connection to the GUI listener.""" + if self.connected: + return True + + print(f"[GuiSender] DEBUG: Attempting connection to {self.gui_ip}:{self.gui_port}...") + try: + if self.client_socket: + try: self.client_socket.close() + except: pass # Kapatma hatasını yoksay + + self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.client_socket.settimeout(5.0) + self.client_socket.connect((self.gui_ip, self.gui_port)) + self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + # DiÄŸer KeepAlive ayarları... + + self.connected = True + print(f"[GuiSender] INFO: Connected successfully to GUI at {self.gui_ip}:{self.gui_port}") + # BaÄŸlantı baÅŸarılı mesajı gönder + # Bu mesaj handle_message'dan geçmeyeceÄŸi için döngüye girmez + self._send_data({'command': 'hello_from_robot', 'params': {'message': 'Robot connected'}}) + return True + except socket.timeout: + # Timeout logunu doÄŸrudan yazdır + print(f"[GuiSender] WARNING: Connection attempt timed out to {self.gui_ip}:{self.gui_port}.") + if self.client_socket: self.client_socket.close() + self.client_socket = None + self.connected = False + return False + except OSError as e: + print(f"[GuiSender] WARNING: Connection OS Error to {self.gui_ip}:{self.gui_port} - {e}") + if self.client_socket: self.client_socket.close() + self.client_socket = None + self.connected = False + return False + except Exception as e: + print(f"[GuiSender] ERROR: Unexpected error connecting to GUI: {e}") + print(f"[GuiSender] DEBUG: Traceback: {traceback.format_exc()}") + if self.client_socket: self.client_socket.close() + self.client_socket = None + self.connected = False + return False + + def _run(self): + """Main loop for the sender thread. Tries to stay connected.""" + while self.running: + if not self.connected: + if not self._connect(): + if self.running: + print(f"[GuiSender] INFO: Connection failed. Retrying in {self.reconnect_delay} seconds...") + time.sleep(self.reconnect_delay) + continue + + # BaÄŸlantı varsa, baÄŸlantının kopup kopmadığını kontrol et + try: + time.sleep(1) # Sürekli CPU kullanmamak için kısa bekleme + # Keepalive kontrolü (isteÄŸe baÄŸlı, daha sık yapılabilir) + if self.running and self.connected and self.client_socket: + # Her 5 saniyede bir boÅŸ byte göndererek test et + if int(time.time()) % 5 == 0: + with self._send_lock: + if self.client_socket: # Tekrar kontrol + self.client_socket.send(b'') + except (OSError, BrokenPipeError): + # Keepalive sırasında baÄŸlantı kopmuÅŸ + print("[GuiSender] WARNING: Connection lost (detected by keepalive). Will reconnect.") + self.connected = False + if self.client_socket: + try: self.client_socket.close() + except: pass + self.client_socket = None + except Exception as e: + print(f"[GuiSender] ERROR: Error during keepalive/wait loop: {e}") + self.connected = False + if self.client_socket: + try: self.client_socket.close() + except: pass + self.client_socket = None + + + # Döngü bittiÄŸinde temizlik + if self.client_socket: + print("[GuiSender] INFO: Closing connection to GUI.") + try: + with self._send_lock: + socket_to_close = self.client_socket + self.client_socket = None + self.connected = False + if socket_to_close: socket_to_close.close() + except Exception as e: + print(f"[GuiSender] WARNING: Error closing socket: {e}") + self.connected = False + print("[GuiSender] INFO: Run loop finished.") + + + def handle_message(self, topic=pub.AUTO_TOPIC, status=None, personalities=None, **kwargs): + """ + Callback function for subscribed DATA pubsub topics. + Explicitly defines expected arguments and uses **kwargs for others. + """ + if not self.running or not self.connected: + return + + topic_name = topic.getName() + data_to_send = None + + if topic_name == 'data:robot_status': + # 'status' artık doğrudan bir argüman, kwargs'dan almaya gerek yok + if status: + # Değişiklik tespiti: Önceki durum ile karşılaştır + if hasattr(self, '_last_status'): + # Eğer durum, göz rengi ve kişilik aynıysa göndermeden çık + if (self._last_status.get('state') == status.get('state') and + self._last_status.get('eye_color') == status.get('eye_color')): + return # Değişiklik yoksa gönderme + + # Durumu kaydet ve değişiklikleri gönder + self._last_status = status.copy() # Kopyalayarak kaydet + data_to_send = { + 'command': 'update_status', + 'params': status + } + else: + print(f"[GuiSender] WARNING: Received 'data:robot_status' but 'status' argument was None.") + + # Diğer 'data:' topicleri (varsa ve forward_topics içindeyse) kwargs üzerinden işlenebilir + elif topic_name in self.data_topics_to_forward: + print(f"[GuiSender] WARNING: Received subscribed data topic '{topic_name}' - passing kwargs.") + # Genel formatta gönder (kwargs içindeki *diğer* veriler varsa) + data_to_send = { + 'command': 'generic_robot_event', + 'params': {'topic': topic_name, 'data': kwargs} # Sadece kalan kwargs'ı gönder + } + + if data_to_send: + self._send_data(data_to_send) + + def _send_data(self, data_dict): + """Sends the formatted data dictionary to the GUI as JSON.""" + # Bu fonksiyon artık sadece baÄŸlıysa çaÄŸrılmalı (handle_message kontrol ediyor) + # Ama yine de çift kontrol yapalım + if not self.connected or not self.client_socket: + # Bu mesajı loglamak yerine doÄŸrudan yazdırmak daha güvenli + # print("[GuiSender] WARNING: _send_data called while not connected (should not happen often).") + return False + + try: + json_data = json.dumps(data_dict) + '\n' + encoded_data = json_data.encode('utf-8') + + with self._send_lock: + # Kilidi aldıktan sonra tekrar kontrol et + if not self.connected or not self.client_socket: return False + self.client_socket.sendall(encoded_data) + # BaÅŸarılı gönderimi debug olarak logla (pubsub kullanma!) + # print(f"[GuiSender] DEBUG: Sent data: {data_dict.get('command', 'N/A')} ({len(encoded_data)} bytes)") + return True + + except (BrokenPipeError, ConnectionResetError, OSError) as e: + # BaÄŸlantı hatası logunu doÄŸrudan yazdır + print(f"[GuiSender] WARNING: Connection error while sending data: {e}. Marking as disconnected.") + self.connected = False + if self.client_socket: + try: self.client_socket.close() + except: pass + self.client_socket = None + # BaÅŸarısız gönderimi debug olarak logla (pubsub kullanma!) + # print(f"[GuiSender] DEBUG: Failed to send: {data_dict}") + return False + except Exception as e: + print(f"[GuiSender] ERROR: Unexpected error sending data: {e}") + print(f"[GuiSender] DEBUG: Traceback: {traceback.format_exc()}") + self.connected = False # Hata durumunda baÄŸlantıyı kesik say + if self.client_socket: + try: self.client_socket.close() + except: pass + self.client_socket = None + return False + + def stop_sender(self): + """Stops the sender thread and closes the connection.""" + if not self.running: + return + + print("[GuiSender] INFO: Stopping sender...") + self.running = False + + # Thread'in bitmesini bekle + if self._thread and self._thread.is_alive(): + try: + self._thread.join(timeout=1.0) + if self._thread.is_alive(): + print("[GuiSender] WARNING: Sender thread did not terminate gracefully.") + except Exception as e: + print(f"[GuiSender] WARNING: Error joining sender thread: {e}") + + # Soketi son kez kapatmayı dene + if self.client_socket: + try: + with self._send_lock: + socket_to_close = self.client_socket + self.client_socket = None + self.connected = False + if socket_to_close: socket_to_close.close() + except Exception as e: + print(f"[GuiSender] WARNING: Error during final socket close: {e}") + + print("[GuiSender] INFO: Stopped.") + +# Örnek Kullanım (Test için - DeÄŸiÅŸtirilmedi) +if __name__ == '__main__': + import logging + # Loglamayı DEBUG seviyesine ayarlayalım ki print mesajları görünsün + # logging.basicConfig(level=logging.INFO) # INFO yeterli olabilir + print("--- RobotToGuiSender Test BaÅŸlatılıyor ---") + + # Basit bir TCP sunucusu (GUI listener'ı simüle etmek için) + def gui_listener_simulator(port): + print(f"[SIMULATOR] GUI listener simulator baÅŸlatılıyor (Port: {port})...") + server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + server_socket.bind(('0.0.0.0', port)) + server_socket.listen(1) + while True: + print("[SIMULATOR] Robot baÄŸlantısı bekleniyor...") + client_socket, addr = server_socket.accept() + print(f"[SIMULATOR] Robot baÄŸlandı: {addr}") + try: + buffer = b"" + separator = b'\n' + while True: + chunk = client_socket.recv(1024) + if not chunk: + print("[SIMULATOR] Robot baÄŸlantısı kapandı.") + break + buffer += chunk + while separator in buffer: + line, buffer = buffer.split(separator, 1) + if line: + print(f"[SIMULATOR] Robot'tan Alındı: {line.decode('utf-8').strip()}") + except ConnectionResetError: + print("[SIMULATOR] Robot baÄŸlantısı sıfırlandı.") + except Exception as e: + print(f"[SIMULATOR] Veri alım hatası: {e}") + finally: + try: client_socket.close() + except: pass + print("[SIMULATOR] Robot soketi kapatıldı.") + except Exception as e: + print(f"[SIMULATOR] Listener hatası: {e}") + finally: + try: server_socket.close() + except: pass + print("[SIMULATOR] Listener durduruldu.") + + gui_port = 8091 + simulator_thread = threading.Thread(target=gui_listener_simulator, args=(gui_port,), daemon=True) + simulator_thread.start() + time.sleep(1) # Simülatörün baÅŸlaması için bekle + + print("RobotToGuiSender test için baÅŸlatılıyor...") + gui_ip = "127.0.0.1" # veya GUI'nin çalıştığı IP + # SADECE veri topiclerini ilet: + sender = RobotToGuiSender( + gui_ip=gui_ip, + gui_port=gui_port, + forward_topics=['data:robot_status', 'data:other_event'], # Sadece data topicleri + autostart=False + ) + sender.start_sender() + + # Pubsub logları için ayrı bir handler (GuiSender'dan bağımsız) + class PrintLogListener: + def __call__(self, msg, level='debug', **kwargs): + print(f"[PUBSUB LOG - {level.upper()}] {msg}") + log_listener = PrintLogListener() + pub.subscribe(log_listener, pub.ALL_TOPICS) # Tüm logları yakala + + print("Sender çalışıyor. Test mesajları yayınlanıyor...") + try: + count = 0 + while True: + count += 1 + # LOG mesajları yayınla (GuiSender bunları GÖNDERMEMELİ, sadece PrintLogListener yazmalı) + pub.sendMessage('log:info', msg=f"Bu bir info mesajı #{count}") + if count % 3 == 0: + pub.sendMessage('log:error', msg=f"Bu bir HATA mesajı #{count//3}") + + # VERİ mesajları yayınla (GuiSender bunları GÖNDERMELİ) + if count % 5 == 0: + status = {'state': f'STATE_{count%4}', 'eye_color': ['blue','red','green'][count%3], 'timestamp': time.time()} + pub.sendMessage('data:robot_status', status=status) + print(f"*** 'data:robot_status' yayınlandı (Count: {count}) ***") + if count % 7 == 0: + pub.sendMessage('data:other_event', data=f"BaÅŸka veri {count}") + print(f"*** 'data:other_event' yayınlandı (Count: {count}) ***") + + + time.sleep(2) + except KeyboardInterrupt: + print("\nCtrl+C alındı, sender durduruluyor...") + sender.stop_sender() + + # Simulatör threadinin bitmesini bekleme (daemon olduÄŸu için otomatik biter) + print("Test bitti.") diff --git a/modules/vision/remote/tracking.py b/modules/vision/remote/tracking.py new file mode 100644 index 00000000..bfaa6efe --- /dev/null +++ b/modules/vision/remote/tracking.py @@ -0,0 +1,62 @@ +from pubsub import pub +from threading import Thread + +class Tracking: + TRACKING_THRESHOLD = 30 + TRACKING_MOVE_PERCENT = 10 + + def __init__(self, vision, **kwargs): + self.vision = vision + self.active = kwargs.get('active', False) + self.use_thread = kwargs.get('thread', False) + if not self.use_thread: + pub.subscribe(self.loop, 'loop') + pub.subscribe(self.set_state, 'rest', active=True) + pub.subscribe(self.set_state, 'wake', active=True) + pub.subscribe(self.set_state, 'sleep', active=False) + pub.subscribe(self.set_state, 'exit', active=False) + + def set_state(self, active): + self.active = active + if active and self.use_thread: + Thread(target=self.loop_thread, args=()).start() + + def loop_thread(self): + while self.active: + self.track_largest_match() + + def loop(self): + if not self.active: + return + self.track_largest_match() + + def track_largest_match(self): + largest = self._largest(self.vision.detect()) + pub.sendMessage('tracking:match', largest=largest, screen=getattr(self.vision, 'dimensions', (640,480))) + if largest is None: + return False + (x, y, w, h) = largest + x_move = Tracking.calc_move_amount(getattr(self.vision, 'dimensions', (640,))[0], x, w) + y_move = Tracking.calc_move_amount(getattr(self.vision, 'dimensions', (480,))[0], y, h) + if x_move: + pub.sendMessage('servo:pan:mv', percentage=x_move) + if y_move: + pub.sendMessage('servo:tilt:mv', percentage=-y_move) + return True + + @staticmethod + def calc_move_amount(screen_w, target_pos, target_w): + screen_c = screen_w / 2 + target_c = target_pos + (target_w / 2) + if abs(screen_c - target_c) > Tracking.TRACKING_THRESHOLD: + return round((screen_c - target_c) / Tracking.TRACKING_MOVE_PERCENT) + return 0 + + def _largest(self, matches): + largest = None + if matches: + for match in matches: + if largest is None or self.vision.get_area(match) > self.vision.get_area(largest): + largest = match + return largest + return None diff --git a/modules/vision/remote/train_model.py b/modules/vision/remote/train_model.py new file mode 100644 index 00000000..542e0263 --- /dev/null +++ b/modules/vision/remote/train_model.py @@ -0,0 +1,250 @@ +from imutils import paths +import face_recognition +import pickle +import cv2 +import os +import shutil +from pubsub import pub +import time +import json +import numpy as np + +class TrainModel: + def __init__(self, **kwargs): + self.path = kwargs.get('dataset', 'dataset') + self.output = kwargs.get('output', 'encodings.pickle') + self.model = kwargs.get('detection_method', 'hog') # 'hog' or 'cnn' + self.jitters = kwargs.get('jitters', 1) # Daha yüksek değerler daha doğru ama daha yavaş + self.progress_callback = kwargs.get('progress_callback', None) + + def ensure_dataset_exists(self): + """Dataset klasörünün varlığını kontrol et ve yoksa oluştur""" + if not os.path.exists(self.path): + os.makedirs(self.path) + pub.sendMessage('log', msg=f'[TrainModel] Created dataset directory: {self.path}') + + def train(self): + """Yüz tanıma modelini eğit""" + self.ensure_dataset_exists() + + # Dataset içindeki resim dosyalarını bul + imagePaths = list(paths.list_images(self.path)) + if len(imagePaths) < 1: + pub.sendMessage('log', msg='[TrainModel] Nothing to process - no images found in dataset') + return 0 + + pub.sendMessage('log', msg=f'[TrainModel] Start processing {len(imagePaths)} images...') + knownEncodings = [] + knownNames = [] + success_count = 0 + fail_count = 0 + + # Her bir resmi işle + for (i, imagePath) in enumerate(imagePaths): + if '.AppleDouble' in imagePath: + continue + + # İlerleme güncelleme + if self.progress_callback: + progress = int((i / len(imagePaths)) * 100) + self.progress_callback(progress, imagePath) + + pub.sendMessage('log', msg=f'[TrainModel] Processing image {i + 1}/{len(imagePaths)} - {imagePath}') + + # Kişi adını dosya yolundan çıkar (dataset/person_name/image.jpg formatında) + name = imagePath.split(os.path.sep)[-2] + + # Resmi oku ve işle + image = cv2.imread(imagePath) + if image is None: + pub.sendMessage('log', msg=f'[TrainModel] Warning: Could not read image {imagePath}') + fail_count += 1 + continue + + # OpenCV'den BGR formatından RGB'ye dönüştür (face_recognition kütüphanesi için) + rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # Yüzleri tespit et - Doğrudan face_recognition.face_locations kullanıyoruz + # Cascade yerine daha doğru sonuçlar elde etmek için + try: + # Yüzleri doğrudan face_recognition ile tespit et + boxes = face_recognition.face_locations(rgb, model=self.model) + + if len(boxes) == 0: + pub.sendMessage('log', msg=f'[TrainModel] Warning: No face detected in {imagePath}') + fail_count += 1 + continue + + # Tespit edilen yüzlerin kodlarını çıkar + encodings = face_recognition.face_encodings(rgb, boxes, num_jitters=self.jitters) + + # Check for valid encodings and filter out invalid ones + valid_encodings = [] + for encoding in encodings: + # Check for NaN values which cause comparison issues + if np.isnan(encoding).any(): + pub.sendMessage('log', msg=f'[TrainModel] Warning: NaN values in encoding for {imagePath}') + continue + + # Check encoding dimension + if len(encoding) != 128: + pub.sendMessage('log', msg=f'[TrainModel] Warning: Incorrect encoding dimension for {imagePath}') + continue + + # Check for reasonable value ranges (-1 to 1) + if np.min(encoding) < -1.1 or np.max(encoding) > 1.1: + pub.sendMessage('log', msg=f'[TrainModel] Warning: Encoding values out of range for {imagePath}') + continue + + valid_encodings.append(encoding) + + if not valid_encodings: + pub.sendMessage('log', msg=f'[TrainModel] Warning: No valid encodings for {imagePath}') + fail_count += 1 + continue + + # Add all valid encodings to our lists + for encoding in valid_encodings: + knownEncodings.append(encoding) + knownNames.append(name) + success_count += 1 + + except Exception as e: + pub.sendMessage('log', msg=f'[TrainModel] Error processing {imagePath}: {e}') + fail_count += 1 + continue + + # Eğer hiçbir encoding oluşturulmadıysa, uyarı ver + if len(knownEncodings) == 0: + pub.sendMessage('log', msg='[TrainModel] No faces were detected in any images') + return 0 + + # Dataları pickle formatında kaydet + pub.sendMessage('log', msg=f'[TrainModel] Serializing {len(knownEncodings)} encodings...') + data = {"encodings": knownEncodings, "names": knownNames} + + # Önce geçici dosyaya kaydet, başarılıysa asıl dosyaya taşı (dosya bozulmasını önlemek için) + temp_output = f"{self.output}.tmp" + with open(temp_output, "wb") as f: + f.write(pickle.dumps(data)) + + # Başarıyla kaydedildiyse, asıl dosya adına taşı + if os.path.exists(temp_output): + if os.path.exists(self.output): + # Eskisini yedekle + backup = f"{self.output}.bak" + try: + shutil.copy2(self.output, backup) + except Exception as e: + pub.sendMessage('log', msg=f'[TrainModel] Warning: Could not backup old model: {e}') + + # Yeni dosyayı taşı + shutil.move(temp_output, self.output) + pub.sendMessage('log', msg=f'[TrainModel] Saved encodings to {self.output}') + + # Toplam kaç kişi için model eğitildiğini raporla + unique_names = set(knownNames) + pub.sendMessage('log', msg=f'[TrainModel] Model contains {len(unique_names)} unique persons: {", ".join(unique_names)}') + + return len(knownEncodings) + + def add_faces_from_webcam(self, person_name, num_images=5, camera_id=0): + """Web kamerasından bir kişi için yüz örnekleri topla""" + if not person_name or person_name.strip() == "": + pub.sendMessage('log', msg='[TrainModel] Error: Person name cannot be empty') + return False + + # İsimdeki geçersiz karakterleri kontrol et ve temizle + person_name = "".join(c for c in person_name if c.isalnum() or c in "_ ").strip() + if not person_name: + pub.sendMessage('log', msg='[TrainModel] Error: Person name contains only invalid characters') + return False + + # Dataset klasörünün varlığını kontrol et + self.ensure_dataset_exists() + + # Kişi için dizin oluştur + person_dir = os.path.join(self.path, person_name) + if not os.path.exists(person_dir): + os.makedirs(person_dir) + + # Kamerayı aç + pub.sendMessage('log', msg=f'[TrainModel] Opening camera for {person_name}...') + cap = cv2.VideoCapture(camera_id) + if not cap.isOpened(): + pub.sendMessage('log', msg='[TrainModel] Error: Could not open camera') + return False + + # Görüntü boyutunu ayarla + cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) + + face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') + + images_captured = 0 + delay_frames = 5 # Her 5 framede bir görüntü al (benzersizlik için) + frame_count = 0 + + pub.sendMessage('log', msg=f'[TrainModel] Ready to capture {num_images} images for {person_name}') + + while images_captured < num_images: + ret, frame = cap.read() + if not ret: + pub.sendMessage('log', msg='[TrainModel] Error reading from camera') + break + + # Görüntü aynası (kullanıcı için daha doğal) + frame = cv2.flip(frame, 1) + + # Görüntü işlemeyi her framede yapmak yerine belirli aralıklarda yap + frame_count += 1 + if frame_count % delay_frames != 0: + # Bilgilendirme metni ekle + text = f"Capturing for: {person_name} ({images_captured}/{num_images})" + cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) + cv2.imshow('Capture Face', frame) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + continue + + # Yüz tespiti + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = face_cascade.detectMultiScale(gray, 1.3, 5) + + # Çerçeve çiz ve göster + for (x, y, w, h) in faces: + cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2) + + text = f"Capturing for: {person_name} ({images_captured}/{num_images})" + cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) + + cv2.imshow('Capture Face', frame) + key = cv2.waitKey(1) & 0xFF + + if key == ord('q'): + break + elif key == ord('c') or key == ord(' '): # 'c' veya space tuşu ile yakala + # Eğer yüz tespiti yaptıysa + if len(faces) > 0: + # En büyük yüzü seç (birden fazla varsa) + largest_face = max(faces, key=lambda rect: rect[2] * rect[3]) + + # Görüntüyü kaydet + timestamp = int(time.time() * 1000) + filename = os.path.join(person_dir, f"{timestamp}.jpg") + cv2.imwrite(filename, frame) + + images_captured += 1 + pub.sendMessage('log', msg=f'[TrainModel] Captured image {images_captured}/{num_images}') + + # Biraz bekle + time.sleep(0.5) + else: + pub.sendMessage('log', msg='[TrainModel] No face detected, try again') + + # Temizle ve kapat + cap.release() + cv2.destroyAllWindows() + + pub.sendMessage('log', msg=f'[TrainModel] Captured {images_captured} images for {person_name}') + return images_captured > 0 diff --git a/modules/vision/remote/video_stream.py b/modules/vision/remote/video_stream.py new file mode 100644 index 00000000..3966eb9e --- /dev/null +++ b/modules/vision/remote/video_stream.py @@ -0,0 +1,282 @@ +import cv2 +from threading import Thread +from pubsub import pub +import platform +import os +import time +import numpy as np + +class VideoStream: + def __init__(self, source_type="webcam", **kwargs): + # Configure environment variables for camera backends + if 'arm' in platform.machine(): + os.environ["OPENCV_VIDEOIO_PRIORITY_MSMF"] = "0" + os.environ["OPENCV_VIDEOIO_PRIORITY_V4L"] = "1" # Use V4L directly when not using GStreamer + os.environ["OPENCV_VIDEOIO_PRIORITY_V4L2"] = "1" + + # Enable GStreamer - important for libcamerasrc + os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "1" + + self.source_type = source_type + self.stopped = False + self.frame = None + self.grabbed = False + self.resolution = kwargs.get('resolution', (640, 480)) + + # Create a blank frame in case initialization fails + self.frame = np.zeros((self.resolution[1], self.resolution[0], 3), dtype=np.uint8) + font = cv2.FONT_HERSHEY_SIMPLEX + cv2.putText(self.frame, "No Camera Feed", (50, self.resolution[1]//2), + font, 1, (255, 255, 255), 2, cv2.LINE_AA) + + # Subscribe to module events + pub.subscribe(self.stop, 'exit') + pub.subscribe(self.update_pipeline, 'camera:set_pipeline') + + # Initialize camera based on source type + self._initialize_camera(kwargs) + + def _initialize_camera(self, kwargs): + """Initialize the camera based on source type and parameters""" + if self.source_type == "remote": + # Connect to remote stream + self.remote_source = kwargs.get('remote_source', 'http://localhost:8000/stream.mjpg') + pub.sendMessage('log', msg=f"[VideoStream] Connecting to remote stream: {self.remote_source}") + + # Set OpenCV parameters for better MJPEG streaming reliability + os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp" + + self.stream = cv2.VideoCapture(self.remote_source) + + # Configure stream parameters + self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 3) # Use a small buffer to reduce latency + + for retry in range(3): + self.grabbed, frame = self.stream.read() + if self.grabbed and frame is not None: + self.frame = frame + pub.sendMessage('log', msg=f"[VideoStream] Successfully connected to remote stream (attempt {retry+1})") + break + else: + pub.sendMessage('log', msg=f"[VideoStream] Failed to connect to remote stream (attempt {retry+1})") + time.sleep(1) + + if not self.grabbed: + pub.sendMessage('log', msg="[VideoStream] All connection attempts to remote stream failed") + elif self.source_type == "gstreamer": + # Add GStreamer pipeline support + gst_pipeline = kwargs.get('gst_pipeline', '') + + # If no pipeline specified, create one using libcamerasrc + if not gst_pipeline: + width, height = self.resolution + framerate = kwargs.get('framerate', 30) + gst_pipeline = ( + f"libcamerasrc ! video/x-raw, width={width}, height={height}, " + f"framerate={framerate}/1 ! videoconvert ! appsink" + ) + + pub.sendMessage('log', msg=f"[VideoStream] Using GStreamer pipeline: {gst_pipeline}") + self.stream = cv2.VideoCapture(gst_pipeline, cv2.CAP_GSTREAMER) + + if self.stream.isOpened(): + pub.sendMessage('log', msg="[VideoStream] GStreamer pipeline opened successfully") + self.grabbed, frame = self.stream.read() + if self.grabbed and frame is not None: + self.frame = frame + pub.sendMessage('log', msg="[VideoStream] First frame captured from GStreamer") + else: + pub.sendMessage('log', msg="[VideoStream] Failed to read frame from GStreamer") + # Fall back to another method + self.source_type = "libcamera" + self.stream.release() + self._initialize_libcamera(kwargs) + else: + pub.sendMessage('log', msg="[VideoStream] Failed to open GStreamer pipeline") + # Fall back to another method + self.source_type = "libcamera" + self._initialize_libcamera(kwargs) + elif self.source_type == "libcamera": + self._initialize_libcamera(kwargs) + elif self.source_type == "picamera": + self._initialize_picamera(kwargs) + else: + # Fall back to OpenCV + self._initialize_opencv(kwargs) + + def _initialize_libcamera(self, kwargs): + """Initialize using libcamera""" + try: + import pylibcamera + from pylibcamera.capture import LibcameraStill + from pylibcamera import formats + + self.libcam = LibcameraStill( + config={ + 'width': self.resolution[0], + 'height': self.resolution[1], + 'format': formats.RGB888, + 'buffer_count': 2, # Double buffering for smooth capture + } + ) + + # Start the camera + self.libcam.start() + + # Get initial frame + raw_frame = self.libcam.capture_array() + self.frame = cv2.cvtColor(raw_frame, cv2.COLOR_RGB2BGR) + self.grabbed = True + + pub.sendMessage('log', msg="[VideoStream] libcamera initialized successfully") + except ImportError as e: + pub.sendMessage('log', msg=f"[VideoStream] pylibcamera direct usage failed: {str(e)}, falling back to picamera2") + # Fall back to picamera2 which also uses libcamera + self.source_type = "picamera" + self._initialize_picamera(kwargs) + + def _initialize_picamera(self, kwargs): + """Initialize using picamera2""" + try: + from picamera2 import Picamera2 + + self.picam2 = Picamera2() + + # Use simpler configuration to avoid control errors + config = self.picam2.create_preview_configuration( + main={"size": self.resolution} + ) + + self.picam2.configure(config) + self.picam2.start() + + # Get initial frame + self.frame = self.picam2.capture_array() + if len(self.frame.shape) > 2 and self.frame.shape[2] == 3: + self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR) + self.grabbed = True + + pub.sendMessage('log', msg="[VideoStream] PiCamera2 initialized successfully") + except Exception as e: + pub.sendMessage('log', msg=f"[VideoStream] PiCamera2 module error: {str(e)}, falling back to OpenCV") + # Try using OpenCV with V4L2 directly instead of GStreamer + self.source_type = "opencv" + self._initialize_opencv(kwargs) + + def _initialize_opencv(self, kwargs): + """Initialize using OpenCV""" + # Try different camera devices and APIs on Raspberry Pi + if 'arm' in platform.machine(): + camera_devices = [ + ('/dev/video0', cv2.CAP_V4L2), + ('/dev/video0', cv2.CAP_ANY), + (0, cv2.CAP_V4L2), + (0, cv2.CAP_ANY) + ] + + for device, api in camera_devices: + pub.sendMessage('log', msg=f"[VideoStream] Trying camera device: {device} with API: {api}") + try: + self.stream = cv2.VideoCapture(device, api) + if self.stream.isOpened(): + self.grabbed, frame = self.stream.read() + if self.grabbed and frame is not None: + self.frame = frame + pub.sendMessage('log', msg=f"[VideoStream] Successfully opened camera with device: {device}, API: {api}") + break + else: + self.stream.release() + except Exception as e: + pub.sendMessage('log', msg=f"[VideoStream] Failed with device {device}: {str(e)}") + else: + camera_id = kwargs.get('camera_id', 0) + self.stream = cv2.VideoCapture(camera_id) + if self.stream.isOpened(): + self.grabbed, frame = self.stream.read() + if self.grabbed and frame is not None: + self.frame = frame + + # Set resolution for OpenCV cameras + if hasattr(self, 'stream') and self.stream.isOpened(): + self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.resolution[0]) + self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.resolution[1]) + + def update_pipeline(self, pipeline): + """Update the camera pipeline with a new one (for dynamic updates)""" + if self.source_type != "gstreamer": + pub.sendMessage('log', msg=f"[VideoStream] Cannot update pipeline for {self.source_type} camera") + return + + pub.sendMessage('log', msg=f"[VideoStream] Updating GStreamer pipeline to: {pipeline}") + + # Stop current stream + if hasattr(self, 'stream') and self.stream is not None: + self.stream.release() + + # Initialize new stream with the pipeline + self.stream = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER) + + if self.stream.isOpened(): + self.grabbed, frame = self.stream.read() + if self.grabbed and frame is not None: + self.frame = frame + pub.sendMessage('log', msg="[VideoStream] Pipeline updated successfully") + else: + pub.sendMessage('log', msg="[VideoStream] Failed to read frame from updated pipeline") + else: + pub.sendMessage('log', msg="[VideoStream] Failed to open updated pipeline") + + def start(self): + Thread(target=self.update, args=()).start() + return self + + def update(self): + while True: + if self.stopped: + self._release_resources() + return + + try: + if self.source_type == "libcamera" and hasattr(self, 'libcam'): + # Capture using direct libcamera + raw_frame = self.libcam.capture_array() + self.frame = cv2.cvtColor(raw_frame, cv2.COLOR_RGB2BGR) + self.grabbed = True + elif self.source_type == "picamera" and hasattr(self, 'picam2'): + self.frame = self.picam2.capture_array() + # Convert to BGR if needed (picamera2 returns RGB by default) + if len(self.frame.shape) > 2 and self.frame.shape[2] == 3: + self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR) + self.grabbed = True + else: + self.grabbed, self.frame = self.stream.read() + + # If frame reading failed, wait a moment and try again + if not self.grabbed or self.frame is None: + time.sleep(0.1) + except Exception as e: + pub.sendMessage('log', msg=f"[VideoStream] Error capturing frame: {str(e)}") + time.sleep(0.5) # Wait before trying again + + def read(self): + return self.frame + + def stop(self): + self.stopped = True + + def _release_resources(self): + if self.source_type == "libcamera" and hasattr(self, 'libcam'): + try: + self.libcam.stop() + except: + pass + elif self.source_type == "picamera" and hasattr(self, 'picam2'): + try: + self.picam2.stop() + except: + pass + elif hasattr(self, 'stream'): + try: + self.stream.release() + except: + pass diff --git a/modules/vision/remote/vision.py b/modules/vision/remote/vision.py new file mode 100644 index 00000000..61b51d80 --- /dev/null +++ b/modules/vision/remote/vision.py @@ -0,0 +1,293 @@ +import cv2 +from datetime import datetime +from imutils.video import FPS +from pubsub import pub +import platform +import os + +try: + from modules.vision.remote.faces import Faces +except ModuleNotFoundError as e: + from faces import Faces +from modules.vision.remote.video_stream import VideoStream + +class RemoteVision: + MODE_MOTION = 0 + MODE_FACES = 1 + + def __init__(self, **kwargs): + # Configure environment variables for camera backends + if 'arm' in platform.machine(): + os.environ["OPENCV_VIDEOIO_PRIORITY_MSMF"] = "0" + os.environ["OPENCV_VIDEOIO_PRIORITY_V4L"] = "1" # Use V4L directly + os.environ["OPENCV_VIDEOIO_PRIORITY_V4L2"] = "1" + + # Enable GStreamer - important for libcamerasrc + use_gstreamer = kwargs.get('enable_gstreamer', True) + if use_gstreamer: + os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "1" + pub.sendMessage('log', msg="[RemoteVision] GStreamer support enabled") + else: + os.environ["OPENCV_VIDEOIO_PRIORITY_GSTREAMER"] = "0" + pub.sendMessage('log', msg="[RemoteVision] GStreamer support disabled") + + self.is_raspberry_pi = 'arm' in platform.machine() + self.dimensions = kwargs.get('resolution', (640, 480)) + + # For Raspberry Pi: just capture and stream, don't process + if self.is_raspberry_pi: + # We'll only set up the camera source here - streaming is handled by camera_streamer + pub.sendMessage('log', msg="[RemoteVision] Setting up camera on Raspberry Pi for streaming") + + # Use a more robust method to detect available camera types + camera_type = kwargs.get('camera_type', 'auto') + + # Check if a custom GStreamer pipeline is provided + gst_pipeline = kwargs.get('gst_pipeline', None) + if gst_pipeline: + camera_type = 'gstreamer' + pub.sendMessage('log', msg=f"[RemoteVision] Using custom GStreamer pipeline: {gst_pipeline}") + + if camera_type == 'auto': + # First try GStreamer if available + try: + test_pipeline = "videotestsrc num-buffers=1 ! appsink" + test_cap = cv2.VideoCapture(test_pipeline, cv2.CAP_GSTREAMER) + if test_cap.isOpened(): + ret = test_cap.read() + test_cap.release() + if ret[0]: + camera_type = 'gstreamer' + # Create a default libcamerasrc pipeline if none provided + if not gst_pipeline: + # Use the confirmed working pipeline with BGR format + gst_pipeline = f"libcamerasrc ! video/x-raw, format=BGR, width={self.dimensions[0]}, height={self.dimensions[1]}, framerate=30/1 ! videoconvert ! appsink" + pub.sendMessage('log', msg=f"[RemoteVision] Using default GStreamer pipeline: {gst_pipeline}") + except Exception as e: + pub.sendMessage('log', msg=f"[RemoteVision] GStreamer test failed: {str(e)}") + # Try picamera next + try: + from picamera2 import Picamera2 + picam = Picamera2() + # Just verify it can be imported and instantiated + camera_type = 'picamera' + del picam + except ImportError: + pub.sendMessage('log', msg="[RemoteVision] PiCamera2 not available, falling back to OpenCV") + camera_type = 'opencv' + except Exception as e: + pub.sendMessage('log', msg=f"[RemoteVision] PiCamera2 error: {str(e)}, falling back to OpenCV") + camera_type = 'opencv' + + # Only try libcamera if explicitly requested in config + if kwargs.get('force_libcamera', False): + try: + import libcamera + camera_type = 'libcamera' + except ImportError: + pub.sendMessage('log', msg="[RemoteVision] Libcamera not available") + + pub.sendMessage('log', msg=f"[RemoteVision] Using {camera_type} on RPi") + + # Pass additional parameters to VideoStream + video_params = kwargs.copy() + if gst_pipeline: + video_params['gst_pipeline'] = gst_pipeline + + # Subscribe to camera pipeline updates from test tools + pub.subscribe(self.update_camera_pipeline, 'camera:set_pipeline') + + self.video = VideoStream( + source_type=camera_type, + resolution=self.dimensions, + **video_params + ).start() + else: + # UPDATED: Use the correct URL format for MJPEG streaming + self.remote_source = kwargs.get('remote_source', f'http://{kwargs.get("robot_ip", "192.168.137.52")}:8000/stream.mjpg') + self.command_endpoint = kwargs.get('command_endpoint', f'http://{kwargs.get("robot_ip", "192.168.137.52")}:8090') + pub.sendMessage('log', msg=f"[RemoteVision] Using command endpoint: {self.command_endpoint}") + pub.sendMessage('log', msg=f"[RemoteVision] Connecting to remote stream: {self.remote_source}") + self.video = VideoStream(source_type="remote", remote_source=self.remote_source, **kwargs).start() + + self.mode = kwargs.get('mode', RemoteVision.MODE_MOTION) + self.path = kwargs.get('path', '/') + self.flip = kwargs.get('flip', False) + self.rotate = kwargs.get('rotate', False) + self.preview = kwargs.get('preview', False) + self.accuracy = kwargs.get('accuracy', 10) + self.static_back = None + self.fps = FPS().start() + pub.subscribe(self.exit, "exit") + + # Add new parameters for laptop-based processing + self.laptop_processing = kwargs.get('laptop_processing', False) + self.priority_persons = kwargs.get('priority_persons', []) + + # For tracking which persons were seen and their priority status + self.detected_persons = {} + self.last_detection_time = {} + + if self.mode == RemoteVision.MODE_FACES: + try: + self.cascade_path = self.path + "/modules/vision/haarcascade_frontalface_default.xml" + self.cascade = cv2.CascadeClassifier(self.cascade_path) + self.faces = Faces(detector=self.cascade, path=self.path) + except Exception as e: + pub.sendMessage('log:error', msg=f"[RemoteVision] Error initializing face detection: {str(e)}") + # Fall back to motion detection + self.mode = RemoteVision.MODE_MOTION + + self.running = True + pub.sendMessage('log', msg=f"[RemoteVision] Initialized with {self.dimensions[0]}x{self.dimensions[1]} resolution") + + def update_camera_pipeline(self, pipeline): + """Update the camera pipeline (called from external test tools)""" + if hasattr(self, 'video') and hasattr(self.video, 'update_pipeline'): + pub.sendMessage('log', msg=f"[RemoteVision] Updating camera pipeline to: {pipeline}") + self.video.update_pipeline(pipeline) + else: + pub.sendMessage('log', msg="[RemoteVision] Unable to update camera pipeline - incompatible video stream") + + def exit(self): + self.running = False + if self.video and hasattr(self.video, "stop"): + self.video.stop() + cv2.destroyAllWindows() + self.fps.stop() + pub.sendMessage("log", msg="[RemoteVision] Approx. FPS: {:.2f}".format(self.fps.fps())) + + def detect(self): + if not self.running: + return + self.fps.update() + frame = self.video.read() + if frame is None or frame.size == 0: + pub.sendMessage('log', msg="[RemoteVision] Stream ends prematurely (no frame); attempting to reconnect...") + try: + self.video.stop() + except Exception: + pass + import time + time.sleep(1) + self.video = VideoStream(source_type="remote", remote_source=self.remote_source, **{'resolution': self.dimensions}).start() + return + + try: + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + except Exception as e: + pub.sendMessage('log', msg=f"[RemoteVision] Error converting frame: {str(e)}") + return + + matches = [] + names = [] + + if self.mode == RemoteVision.MODE_FACES: + try: + matches = self.cascade.detectMultiScale( + gray, + scaleFactor=1.1, + minNeighbors=self.accuracy, + minSize=(30, 30) + ) + if len(matches) > 0: + cnt = 0 + for (x, y, w, h) in matches: + cropped = frame[y:y+h, x:x+w] + cnt += 1 + names += self.faces.detect(cropped, [(0, 0, w, h)], cnt == len(matches)) + except Exception as e: + pub.sendMessage('log', msg=f"[RemoteVision] Error in face detection: {str(e)}") + + elif self.mode == RemoteVision.MODE_MOTION: + try: + gray_blur = cv2.GaussianBlur(gray, (21, 21), 0) + if self.static_back is None: + self.static_back = gray_blur + return + diff_frame = cv2.absdiff(self.static_back, gray_blur) + thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1] + thresh_frame = cv2.dilate(thresh_frame, None, iterations=2) + contours, _ = cv2.findContours(thresh_frame.copy(), + cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + for contour in contours: + if cv2.contourArea(contour) < 10000: + continue + (x, y, w, h) = cv2.boundingRect(contour) + matches.append((x, y, w, h)) + except Exception as e: + pub.sendMessage('log', msg=f"[RemoteVision] Error in motion detection: {str(e)}") + + if self.preview and frame is not None: + self.render(frame, matches, names) + return matches + + def render(self, frame, matches, names): + index = 0 + for (x, y, w, h) in matches: + cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3) + if names and index < len(names): + label_y = y - 15 if y - 15 > 15 else y + 15 + cv2.putText(frame, names[index], (x, label_y), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2) + index += 1 + try: + cv2.imshow("Remote Preview", frame) + cv2.waitKey(1) + except Exception as e: + pub.sendMessage('log', msg=f"[RemoteVision] Error displaying preview: {str(e)}") + + def get_area(self, match): + if match is not None: + x, y, w, h = match + return float(w) * float(h) + return 0 + + def process_face_data(self, face_data): + """Process face detection data from laptop + + This method is called when the laptop sends face recognition data + through the command channel, letting the robot know about detected faces + without doing local processing. + """ + if not self.laptop_processing: + return + + try: + count = face_data.get('count', 0) + names = face_data.get('names', []) + priority_names = face_data.get('priority_names', []) + + now = time.time() + + # Update detection tracking + for name in names: + self.detected_persons[name] = now + + # Process highest priority person for tracking + if priority_names: + # If multiple priority persons, choose the one with highest priority + highest_priority = priority_names[0] # Default to first + + # Publish tracking info for the robot's servos + pub.sendMessage('priority_person:detected', name=highest_priority) + + # Log the detection + pub.sendMessage('log', msg=f"[RemoteVision] Detected priority person: {highest_priority}") + + elif names: + # If no priority persons but regular persons detected + pub.sendMessage('log', msg=f"[RemoteVision] Detected {count} persons: {', '.join(names)}") + + # Clean up old detections (older than 5 seconds) + for name in list(self.detected_persons.keys()): + if now - self.detected_persons[name] > 5: + del self.detected_persons[name] + + except Exception as e: + pub.sendMessage('log:error', msg=f"[RemoteVision] Error processing face data: {e}") + + def set_priority_persons(self, persons): + """Update the list of priority persons""" + self.priority_persons = persons + pub.sendMessage('log', msg=f"[RemoteVision] Updated {len(persons)} priority persons") diff --git a/tests/test_sensor.py b/tests/test_sensor.py new file mode 100644 index 00000000..8a2ea654 --- /dev/null +++ b/tests/test_sensor.py @@ -0,0 +1,33 @@ +import unittest +from unittest.mock import patch, MagicMock + +# Mock gpiozero and pubsub libraries +import sys +sys.modules['gpiozero'] = MagicMock() +sys.modules['pubsub'] = MagicMock() +sys.modules['pubsub.pub'] = MagicMock() + +from modules.sensor import Sensor + +class TestSensor(unittest.TestCase): + def setUp(self): + self.pin = 4 + self.sensor = Sensor(pin=self.pin) + + @patch('modules.sensor.MotionSensor') + def test_init(self, MockMotionSensor): + sensor_instance = MockMotionSensor.return_value + sensor = Sensor(pin=self.pin) + self.assertEqual(sensor.pin, self.pin) + self.assertEqual(sensor.sensor, sensor_instance) + + @patch('modules.sensor.MotionSensor') + def test_read(self, MockMotionSensor): + sensor_instance = MockMotionSensor.return_value + sensor_instance.motion_detected = True + sensor = Sensor(pin=self.pin) + self.assertTrue(sensor.read()) + self.assertTrue(sensor.value) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file