diff --git a/.github/workflows/dev-env-setup.yml b/.github/workflows/dev-env-setup.yml deleted file mode 100644 index 4c25c8ce..00000000 --- a/.github/workflows/dev-env-setup.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: Test Nixopus Development Environment Setup - -on: - push: - branches: - [master, feat/develop, feat/dev_environment, feat/dev_environment-macos, test-feat] - paths: - - 'scripts/setup.sh' - - '.github/workflows/dev-env-setup.yml' - workflow_dispatch: - -jobs: - setup-environment: - strategy: - matrix: - os: [ubuntu-latest] - runs-on: ${{ matrix.os }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker - if: runner.os == 'Linux' - uses: docker/setup-docker-action@v3 - - # - name: Set up Docker (macOS-13) - # if: runner.os == 'macOS' - # uses: docker/setup-docker-action@v4 - # env: - # LIMA_START_ARGS: --cpus 4 --memory 8 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Install Docker Compose - run: | - sudo apt-get update - sudo apt-get install -y ca-certificates curl gnupg - sudo install -m 0755 -d /etc/apt/keyrings - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - sudo chmod a+r /etc/apt/keyrings/docker.gpg - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ - $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update - sudo apt-get install -y docker-compose-plugin - docker compose version - - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y python3 python3-pip git openssl openssh-server - - # - name: Install deps (macOS-13) - # if: runner.os == 'macOS' - # run: | - # brew update - # brew install python git openssl - - - name: Make setup script executable - run: chmod +x scripts/setup.sh - - - name: Run setup script - run: | - if [[ "$RUNNER_OS" == "Linux" ]]; then - sudo ./scripts/setup.sh - else - ./scripts/setup.sh - fi diff --git a/.github/workflows/format.yaml b/.github/workflows/format.yaml index f171f810..22be4523 100644 --- a/.github/workflows/format.yaml +++ b/.github/workflows/format.yaml @@ -8,6 +8,7 @@ on: paths: - 'api/**' - 'view/**' + - 'cli/**' workflow_dispatch: permissions: @@ -92,4 +93,48 @@ jobs: skip_dirty_check: false skip_fetch: true skip_checkout: true + disable_globbing: false + + format-cli: + name: Format CLI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + token: ${{ secrets.GITHUB_TOKEN }} + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: latest + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Install dependencies + working-directory: cli + run: poetry install --with dev --quiet + + - name: Run formatting + working-directory: cli + run: make format + + - name: Commit CLI changes + id: cli-commit + if: github.event_name == 'pull_request' + uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: 'style(cli): format Python code' + branch: ${{ github.head_ref }} + file_pattern: 'cli/**/*.py' + commit_user_name: 'github-actions[bot]' + commit_user_email: 'github-actions[bot]@users.noreply.github.com' + skip_dirty_check: false + skip_fetch: true + skip_checkout: true disable_globbing: false \ No newline at end of file diff --git a/.github/workflows/greetings.yaml b/.github/workflows/greetings.yaml deleted file mode 100644 index d19553ce..00000000 --- a/.github/workflows/greetings.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: Greetings - -on: [pull_request_target, issues] - -jobs: - greeting: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: actions/first-interaction@v1 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: "Thank you for creating your first issue! We appreciate your contribution and will review it soon. Please ensure you've provided all necessary details and followed the issue template." - pr-message: "Thank you for your first pull request! Before we review, please ensure your code follows our quality standards: run tests, check formatting, and verify linting. We'll review your changes as soon as possible." \ No newline at end of file diff --git a/.github/workflows/qemu.yml b/.github/workflows/qemu.yml deleted file mode 100644 index 56ea8941..00000000 --- a/.github/workflows/qemu.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: QEMU Setup and Installation - -on: - push: - branches: [master, feat/develop] - paths: - - 'scripts/install.sh' - - 'installer/**' - - '.github/workflows/qemu.yml' - workflow_dispatch: - -env: - ADMIN_EMAIL: admin@nixopus.local - ADMIN_PASSWORD: Nixopus@123!Secure - -jobs: - setup-environment: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker - uses: docker/setup-docker-action@v3 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Install Docker Compose - run: | - sudo apt-get update - sudo apt-get install -y ca-certificates curl gnupg - sudo install -m 0755 -d /etc/apt/keyrings - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - sudo chmod a+r /etc/apt/keyrings/docker.gpg - echo \ - "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ - "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update - sudo apt-get install -y docker-compose-plugin - docker compose version - - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y python3 python3-pip git openssl - - - name: Make install script executable - run: chmod +x scripts/install.sh - - test-installation: - needs: setup-environment - runs-on: ubuntu-latest - strategy: - matrix: - include: - # Basic combinations - - name: minimal - env: production - email: "" - password: "" - api_domain: "" - app_domain: "" - - name: email-only - env: production - email: admin@nixopus.local - password: "" - api_domain: "" - app_domain: "" - - name: password-only - env: production - email: "" - password: Nixopus@123!Secure - api_domain: "" - app_domain: "" - - name: email-password - env: production - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: "" - app_domain: "" - - # Domain combinations - - name: api-domain-only - env: production - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: api.nixopus.local - app_domain: "" - - name: app-domain-only - env: production - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: "" - app_domain: app.nixopus.local - - name: both-domains - env: production - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: api.nixopus.local - app_domain: app.nixopus.local - - # Staging combinations - - name: staging-minimal - env: staging - email: "" - password: "" - api_domain: "" - app_domain: "" - - name: staging-email-password - env: staging - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: "" - app_domain: "" - - name: staging-api-domain-only - env: staging - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: api.staging.nixopus.local - app_domain: "" - - name: staging-app-domain-only - env: staging - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: "" - app_domain: app.staging.nixopus.local - - name: staging-both-domains - env: staging - email: admin@nixopus.local - password: Nixopus@123!Secure - api_domain: api.staging.nixopus.local - app_domain: app.staging.nixopus.local - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Run install script - run: | - cmd="./scripts/install.sh" - - if [ -n "${{ matrix.email }}" ]; then - cmd="$cmd --email=${{ matrix.email }}" - fi - - if [ -n "${{ matrix.password }}" ]; then - cmd="$cmd --password=${{ matrix.password }}" - fi - - if [ -n "${{ matrix.api_domain }}" ]; then - cmd="$cmd --api-domain=${{ matrix.api_domain }}" - fi - - if [ -n "${{ matrix.app_domain }}" ]; then - cmd="$cmd --app-domain=${{ matrix.app_domain }}" - fi - - cmd="$cmd --env=${{ matrix.env }}" - - echo "Running command: $cmd" - sudo $cmd \ No newline at end of file diff --git a/.github/workflows/release-cli.yml b/.github/workflows/release-cli.yml new file mode 100644 index 00000000..e2fef92b --- /dev/null +++ b/.github/workflows/release-cli.yml @@ -0,0 +1,293 @@ +name: Build and Package CLI + +on: + push: + branches: [feat/cli_releaser] + paths: + - "cli/**" + - ".github/workflows/build-and-package-cli.yml" + workflow_dispatch: + +jobs: + build-and-package: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.VERSION }} + strategy: + matrix: + include: + - arch: x86_64 + python-arch: x64 + - arch: aarch64 + python-arch: x64 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.11" + architecture: ${{ matrix.python-arch }} + + - name: Set up QEMU for cross-compilation + if: matrix.arch == 'aarch64' + uses: docker/setup-qemu-action@v3 + with: + platforms: arm64 + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: latest + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Cache dependencies + uses: actions/cache@v3 + with: + path: cli/.venv + key: venv-${{ runner.os }}-${{ hashFiles('cli/poetry.lock') }} + + - name: Install dependencies + working-directory: cli + run: poetry install + + - name: Extract version from pyproject.toml + id: version + working-directory: cli + run: | + VERSION=$(poetry version --short) + echo "VERSION=$VERSION" >> $GITHUB_OUTPUT + echo "Extracted version: $VERSION" + + - name: Install Ruby and fpm + run: | + sudo apt-get update + sudo apt-get install -y ruby ruby-dev rubygems build-essential + sudo gem install --no-document fpm + + - name: Build CLI binary + working-directory: cli + run: | + chmod +x build.sh + if [ ! -d "helpers" ]; then + ln -s ../helpers helpers + fi + ./build.sh --no-test --no-cleanup + + - name: Prepare binary for packaging + working-directory: cli + run: | + mkdir -p packaging/usr/local/bin + if [ -f "dist/nixopus" ]; then + cp dist/nixopus packaging/usr/local/bin/ + if [ -d "dist/nixopus_linux_amd64" ]; then + cp -r dist/nixopus_linux_amd64 packaging/usr/local/bin/ + elif [ -d "dist/nixopus_linux_arm64" ]; then + cp -r dist/nixopus_linux_arm64 packaging/usr/local/bin/ + fi + else + echo "Build output not found in expected location" + ls -la dist/ + exit 1 + fi + chmod +x packaging/usr/local/bin/nixopus + + - name: Set architecture variables + run: | + echo "ARCH=${{ matrix.arch }}" >> $GITHUB_ENV + echo "PKG_ARCH=${{ matrix.arch == 'aarch64' && 'arm64' || 'amd64' }}" >> $GITHUB_ENV + + - name: Create DEB package + working-directory: cli + run: | + fpm -s dir -t deb \ + -n nixopus \ + -v "${{ steps.version.outputs.VERSION }}" \ + -a "$PKG_ARCH" \ + --description "A CLI for Nixopus" \ + --maintainer "Nixopus " \ + --license "FSL" \ + --url "https://github.com/nixopus/cli" \ + --deb-priority optional \ + --deb-compression bzip2 \ + --prefix / \ + packaging/=/ + + - name: Create RPM package + working-directory: cli + run: | + fpm -s dir -t rpm \ + -n nixopus \ + -v "${{ steps.version.outputs.VERSION }}" \ + -a "$PKG_ARCH" \ + --description "A CLI for Nixopus" \ + --maintainer "Nixopus " \ + --license "FSL" \ + --url "https://github.com/nixopus/cli" \ + --rpm-compression bzip2 \ + --prefix / \ + packaging/=/ + + - name: Create TAR.GZ package + working-directory: cli + run: | + fpm -s dir -t tar \ + -n nixopus \ + -v "${{ steps.version.outputs.VERSION }}" \ + -a "$PKG_ARCH" \ + --description "A CLI for Nixopus" \ + --maintainer "Nixopus " \ + --license "FSL" \ + --url "https://github.com/nixopus/cli" \ + --prefix / \ + packaging/=/ + + - name: Create APK package + working-directory: cli + run: | + fpm -s dir -t apk \ + -n nixopus \ + -v "${{ steps.version.outputs.VERSION }}" \ + -a "$PKG_ARCH" \ + --description "A CLI for Nixopus" \ + --maintainer "Nixopus " \ + --license "FSL" \ + --url "https://github.com/nixopus/cli" \ + --prefix / \ + packaging/=/ + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: nixopus-packages-${{ matrix.arch }} + path: | + cli/*.deb + cli/*.rpm + cli/*.tar + cli/*.apk + retention-days: 30 + + create-release: + needs: build-and-package + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/feat/cli_releaser' && github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + fetch-depth: 0 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: ./artifacts + + - name: Update root package.json with CLI version + run: | + CLI_VERSION="${{ needs.build-and-package.outputs.version }}" + echo "Updating root package.json with CLI version: $CLI_VERSION" + + # Collect all package file names + PACKAGE_FILES=() + while IFS= read -r -d '' file; do + filename=$(basename "$file") + PACKAGE_FILES+=("$filename") + done < <(find ./artifacts -type f \( -name "*.deb" -o -name "*.rpm" -o -name "*.tar" -o -name "*.apk" \) -print0) + + echo "Found package files: ${PACKAGE_FILES[@]}" + + # Convert array to JSON format + PACKAGES_JSON=$(printf '%s\n' "${PACKAGE_FILES[@]}" | jq -R . | jq -s .) + echo "Packages JSON: $PACKAGES_JSON" + + # Update package.json with version and package list + jq --arg version "$CLI_VERSION" \ + --argjson packages "$PACKAGES_JSON" \ + '. + {"cli-version": $version, "cli-packages": $packages}' \ + package.json > package.json.tmp + mv package.json.tmp package.json + + echo "Updated package.json content:" + jq '.["cli-version"], .["cli-packages"]' package.json + + - name: Create Pull Request for package.json update + run: | + CLI_VERSION="${{ needs.build-and-package.outputs.version }}" + BRANCH_NAME="update-cli-version-$CLI_VERSION" + + # Configure git + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + # Check if changes exist + if git diff --quiet package.json; then + echo "No changes to package.json, skipping PR creation" + exit 0 + fi + + # Create new branch + git checkout -b "$BRANCH_NAME" + + # Add and commit changes + git add package.json + git commit -m "chore: update cli-version to $CLI_VERSION and cli-packages array + + - Updated cli-version to $CLI_VERSION + - Updated cli-packages array with available package files + + This PR was automatically created by the CLI release workflow to keep the root package.json in sync with the CLI version from pyproject.toml." + + # Push branch + git push origin "$BRANCH_NAME" + + # Create PR (only if branch doesn't exist) + if ! gh pr list --head "$BRANCH_NAME" --json number --jq '. | length' | grep -q "0"; then + echo "PR already exists for branch $BRANCH_NAME" + exit 0 + fi + + gh pr create \ + --title "chore: update cli-version to $CLI_VERSION" \ + --body "This PR updates the \`cli-version\` field and \`cli-packages\` array in the root package.json to match the current CLI version ($CLI_VERSION) from the pyproject.toml file. + + **Changes:** + - Updated \`cli-version\` field to \`$CLI_VERSION\` + - Updated \`cli-packages\` array with available package files + + **Available Packages:** + $(jq -r '.["cli-packages"][]' package.json | sed 's/^/- /') + + **Download URLs:** + $(jq -r '.["cli-packages"][]' package.json | sed "s|^|- https://github.com/raghavyuva/nixopus/releases/download/nixopus-$CLI_VERSION/|") + + This PR was automatically created by the CLI release workflow." \ + --base master \ + --head "$BRANCH_NAME" \ + --label "automated" \ + --label "cli" || echo "Failed to create PR, but continuing with release" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Create GitHub Release + run: | + CLI_VERSION="${{ needs.build-and-package.outputs.version }}" + RELEASE_TAG="nixopus-$CLI_VERSION" + + echo "Creating release with tag: $RELEASE_TAG" + echo "Files to upload:" + find ./artifacts -type f \( -name "*.deb" -o -name "*.rpm" -o -name "*.tar" -o -name "*.apk" \) -exec ls -la {} \; + + gh release create "$RELEASE_TAG" \ + --title "Nixopus CLI v$CLI_VERSION" \ + --notes "Nixopus CLI v$CLI_VERSION - Packages: DEB, RPM, TAR, APK for x86_64 and ARM64" \ + --target ${{ github.ref_name }} \ + --prerelease + + find ./artifacts -type f \( -name "*.deb" -o -name "*.rpm" -o -name "*.tar" -o -name "*.apk" \) -exec gh release upload "$RELEASE_TAG" {} \; + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 89ee350c..093d15e3 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,52 @@ docs/.vitepress/cache !api/.env.sample **.log .vscode + +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +venv/ +env/ +ENV/ +env.bak/ +venv.bak/ +.venv/ + +.idea/ + +.pytest_cache/ +.coverage +htmlcov/ + +.ipynb_checkpoints + +.env.local +.env.development.local +.env.test.local +.env.production.local + +# Poetry +poetry.lock + +**/.DS_Store +.DS_Store diff --git a/README.md b/README.md index 0403711d..2ec53fb6 100644 --- a/README.md +++ b/README.md @@ -47,55 +47,10 @@ ## Project Overview - -Nixopus is a powerful platform designed to simplify VPS management. Whether you're a DevOps engineer, system administrator, or developer, Nixopus streamlines your workflow with comprehensive tools for deployment, monitoring, and maintenance. +Nixopus streamlines your workflow with comprehensive tools for deployment, monitoring, and maintenance. > ⚠️ **Important Note**: Nixopus is currently in alpha/pre-release stage and is not yet ready for production use. While you're welcome to try it out, we recommend waiting for the beta or stable release before using it in production environments. The platform is still undergoing testing and development. -## Features - -- **Simplified VPS management** - - *1 Click Application Deployment*: Deploy applications effortlessly with a single click. - - *Integrated Web-Based Terminal*: Access your server's terminal directly from the browser. - - *Intuitive File Manager*: Navigate and manage server files through a user-friendly interface. - - *Real Time Monitoring*: Monitor your server's CPU, RAM, containers usage in real-time. - - *Built in TLS Management*: Configure & manage TLS certificates for your domains. - - *GitHub Integration for CI/CD*: Seamlessly integrate GitHub repositories. - - *Proxy Management via Caddy*: Configure and manage reverse proxies. - - *Notification Integration*: Configure to send real-time alerts to channels including Slack, Discord, or Email. -- **Comprehensive deployment tools** -- **User-friendly interface** -- **Customizable installation options** -- **Self Host Deployment** - -## Table of Contents - -- [Project Overview](#project-overview) -- [Features](#features) -- [Table of Contents](#table-of-contents) -- [Demo / Screenshots](#demo--screenshots) -- [Installation \& Quick Start](#installation--quick-start) - - [Optional Parameters](#optional-parameters) - - [Accessing Nixopus](#accessing-nixopus) -- [Usage](#usage) -- [Architecture](#architecture) -- [Development Guide](#development-guide) - - [Development Setup](#development-setup) - - [Running the Application](#running-the-application) - - [Making Changes](#making-changes) - - [Submitting a Pull Request](#submitting-a-pull-request) - - [Proposing New Features](#proposing-new-features) - - [Extending Documentation](#extending-documentation) -- [Contribution Guidelines](#contribution-guidelines) -- [Sponsorship](#sponsorship) -- [Community \& Support](#community--support) -- [License](#license) -- [Code of Conduct](#code-of-conduct) -- [Acknowledgments](#acknowledgments) -- [About the Name](#about-the-name) -- [Contributors](#contributors) -- [🎗️ Sponsors](#️-sponsors) - ## Demo / Screenshots | Self Host Stats | Team Display | File Manager | @@ -106,6 +61,17 @@ Nixopus is a powerful platform designed to simplify VPS management. Whether you' | :-: | :-: | :-: | | Self Host Logs | Dashboard Overview | Notification Preferences | +# Features + +- **Deploy apps with one click.** No config files, no SSH commands. +- **Manage files in your browser.** Drag, drop, edit. Like any file manager. +- **Built-in terminal.** Access your server without leaving the page. +- **Real-time monitoring.** See CPU, RAM, disk usage at a glance. +- **Auto SSL certificates.** Your domains get HTTPS automatically. +- **GitHub integration.** Push code → auto deploy. +- **Proxy management.** Route traffic with Caddy reverse proxy. +- **Smart alerts.** Get notified via Slack, Discord, or email when something's wrong. + ## Installation & Quick Start This section will help you set up Nixopus on your VPS quickly. @@ -136,167 +102,6 @@ sudo bash -c "$(curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/r --env production ``` -#### Accessing Nixopus - -After successful installation, you can access the Nixopus dashboard by visiting the URL you specified in the `--app-domain` parameter (e.g., `https://nixopus.example.tld`). Use the email and password you provided during installation to log in. - -> **Note**: The installation script has not been tested in all distributions and different operating systems. If you encounter any issues during installation, please create an issue on our [GitHub repository](https://github.com/raghavyuva/nixopus/issues) with details about your environment and the error message you received. - -## Usage - -Once installed, Nixopus provides a dashboard for managing your VPS. You can deploy applications, monitor performance, and perform maintenance tasks directly from the interface. - -## Architecture - -Nixopus is built using a microservices architecture, leveraging Go for backend services and React for the frontend. It uses PostgreSQL for data storage and is designed to be scalable and efficient. To learn more about the architecture, refer to the [Architecture Overview](https://docs.nixopus.com/architecture) section in the documentation. - -## Development Guide - -### Development Setup - -1. Clone the repository: - -```bash -git clone https://github.com/raghavyuva/nixopus.git -cd nixopus -``` - -2. Install Go (version 1.23.6 or newer), and PostgreSQL. - -3. Set up PostgreSQL databases: - -```bash -createdb postgres -U postgres - -createdb nixopus_test -U postgres -``` - -4. Copy and configure environment variables: - -```bash -cp .env.sample .env -``` - -5. Install project dependencies: - -```bash -cd api -go mod download - -cd ../view -yarn install -``` - -6. Load development fixtures (optional but recommended): - -```bash -cd ../api -# Load fixtures without affecting existing data -make fixtures-load -``` - -The fixtures system provides sample data including users, organizations, roles, permissions, and feature flags to help you get started quickly with development. Make Changes to Fixtures data in `/api/fixtures/development` directory if required. [Read More About Fixtures](https://docs.nixopus.com/contributing/fixtures) - -### Running the Application - -1. Start the API service: - -```bash -cd api -air -``` - -2. Start the view service: - -```bash -cd view -yarn dev -``` - -### Making Changes - -Nixopus follows [Forking-Workflow]([https://www.atlassian.com/continuous-delivery/continuous-integration/trunk-based-development](https://www.atlassian.com/git/tutorials/comparing-workflows/forking-workflow)) conventions. - -1. Create a new branch: - -```bash -git checkout -b feature/your-feature-name -``` - -2. Make your changes following the project structure: - - Place new features under `api/internal/features/` - - Add tests for new functionality - - Update migrations if needed - - Follow existing patterns for controllers, services, and storage - - For frontend changes, follow the Next.js app directory structure - -3. Run tests: - -```bash -cd api -make test - -# View linting -cd view -yarn lint -``` - -4. Commit your changes with clear messages. - ---- - -### Submitting a Pull Request - -1. Push your branch and create a pull request. -2. Ensure your code: - - Follows the project structure - - Includes tests - - Updates documentation if needed - - Passes all CI checks -3. Be prepared to address feedback. - -### Proposing New Features - -1. Check existing issues and pull requests. - -2. Create a new issue with the `Feature request` template. - -3. Include: - - Feature description - - Technical implementation details - - Impact on existing code - -### Extending Documentation - -Documentation is located in the `docs/` directory. Follow the existing structure and style when adding new content. - -## Contribution Guidelines - -Thank you for your interest in contributing to Nixopus! This [guide](docs/contributing/README.md) will help you get started with the development setup and explain the contribution process. - -## Sponsorship - -We've dedicated significant time to making Nixopus free and accessible. Your support helps us continue our development and vision for open source. Consider becoming a sponsor and join our community of supporters. - -- ![GitHub Sponsors](https://img.shields.io/github/sponsors/raghavyuva?label=Github%20Sponsor) -- Donate to raghavyuva via Liberapay - -## Community & Support - -If you find Nixopus useful, please consider giving it a star and sharing it with your network! - -## License - -Nixopus is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for more information. - -## Code of Conduct - -Before contributing, please review and agree to our [Code of Conduct](/docs/code-of-conduct/index.md). We're committed to maintaining a welcoming and inclusive community. - -## Acknowledgments - -We would like to thank all contributors and supporters of Nixopus. Your efforts and feedback are invaluable to the project's success. - ## About the Name Nixopus is derived from the combination of "octopus" and the Linux penguin (Tux). While the name might suggest a connection to [NixOS](https://nixos.org/), Nixopus is an independent project with no direct relation to NixOS or its ecosystem. @@ -307,8 +112,6 @@ Nixopus is derived from the combination of "octopus" and the Linux penguin (Tux) Nixopus project contributors -Made with [contrib.rocks](https://contrib.rocks). - ## 🎗️ Sponsors diff --git a/api/api/versions.json b/api/api/versions.json index c9a8d8a6..27bb3626 100644 --- a/api/api/versions.json +++ b/api/api/versions.json @@ -3,7 +3,7 @@ { "version": "v1", "status": "active", - "release_date": "2025-07-10T02:54:43.011943+05:30", + "release_date": "2025-08-06T17:31:26.311348+05:30", "end_of_life": "0001-01-01T00:00:00Z", "changes": [ "Initial API version" diff --git a/assets/cover.png b/assets/cover.png deleted file mode 100644 index 6c06de3e..00000000 Binary files a/assets/cover.png and /dev/null differ diff --git a/assets/nixopus_logo_bottom_text.png b/assets/nixopus_logo_bottom_text.png deleted file mode 100644 index a8d585dd..00000000 Binary files a/assets/nixopus_logo_bottom_text.png and /dev/null differ diff --git a/assets/nixopus_logo_side_text.png b/assets/nixopus_logo_side_text.png deleted file mode 100644 index ebc9e5fe..00000000 Binary files a/assets/nixopus_logo_side_text.png and /dev/null differ diff --git a/assets/nixopus_logo_violet_variant.png b/assets/nixopus_logo_violet_variant.png deleted file mode 100644 index 11e172af..00000000 Binary files a/assets/nixopus_logo_violet_variant.png and /dev/null differ diff --git a/cli/Makefile b/cli/Makefile new file mode 100644 index 00000000..00587716 --- /dev/null +++ b/cli/Makefile @@ -0,0 +1,73 @@ +.PHONY: help setup test test-cov lint clean format check build publish dev nixopus + +help: ## Show available commands + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' + +setup: ## Setup Python environment and install dependencies + @if command -v poetry >/dev/null 2>&1; then \ + echo "Poetry found. Installing dependencies..."; \ + poetry install --with dev --quiet; \ + echo "Environment ready! Use: make nixopus ARGS=\"command\""; \ + else \ + echo "Poetry not found. Installing Poetry..."; \ + curl -sSL https://install.python-poetry.org | python3 - >/dev/null 2>&1; \ + echo "Poetry installed. Please restart your shell or run: source ~/.bashrc (or ~/.zshrc)"; \ + echo "Then run 'make setup' again to install dependencies."; \ + fi + +test: ## Run tests + @poetry run pytest + +test-cov: ## Run tests with coverage + @poetry run pytest --cov=app --cov-report=term-missing --cov-report=html + +lint: ## Run linting + @poetry run flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + @poetry run flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + +format: ## Format code + @poetry run black . --quiet + @poetry run isort . --quiet + +check: ## Run linting and tests + $(MAKE) lint && $(MAKE) test + +clean: ## Clean build artifacts + @rm -rf build/ dist/ *.egg-info/ .pytest_cache/ htmlcov/ .coverage + @find . -type d -name __pycache__ -delete + @find . -type f -name "*.pyc" -delete + +build: ## Build the package + @poetry build + +publish: ## Publish to PyPI + @poetry publish + +dev: ## Activate development shell + @poetry shell + + +# ----------------------------------------------------------------------------- +# Nixopus test CLI commands +# ----------------------------------------------------------------------------- +nixopus: ## Run nixopus CLI + @if [ -z "$(ARGS)" ]; then \ + poetry run nixopus --help; \ + else \ + poetry run nixopus $(ARGS); \ + fi + +conflict: ## Run conflict command + @poetry run nixopus conflict $(ARGS) + +preflight: ## Run preflight command + @poetry run nixopus preflight $(ARGS) + +version: ## Show version + @poetry run nixopus version + +run: ## Run nixopus CLI directly + poetry run nixopus + +generate-docs: ## Generate CLI documentation + typer app.main utils docs --output ../docs/cli/cli-reference.md --name nixopus diff --git a/cli/README.md b/cli/README.md new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/cli/README.md @@ -0,0 +1 @@ + diff --git a/cli/__init__.py b/cli/__init__.py new file mode 100644 index 00000000..64647441 --- /dev/null +++ b/cli/__init__.py @@ -0,0 +1 @@ +# cli main module diff --git a/cli/app/__init__.py b/cli/app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/app/commands/__init__.py b/cli/app/commands/__init__.py new file mode 100644 index 00000000..0aac0e26 --- /dev/null +++ b/cli/app/commands/__init__.py @@ -0,0 +1 @@ +# cli commands module diff --git a/cli/app/commands/clone/__init__.py b/cli/app/commands/clone/__init__.py new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/cli/app/commands/clone/__init__.py @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/cli/app/commands/clone/clone.py b/cli/app/commands/clone/clone.py new file mode 100644 index 00000000..f39d2c94 --- /dev/null +++ b/cli/app/commands/clone/clone.py @@ -0,0 +1,258 @@ +import os +import subprocess +from typing import Optional, Protocol + +from pydantic import BaseModel, Field, field_validator + +from app.utils.lib import DirectoryManager +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol + +from .messages import ( + debug_cloning_repo, + debug_executing_git_clone, + debug_git_clone_success, + debug_git_clone_failed, + debug_unexpected_error, + debug_removing_directory, + debug_directory_removal_failed, + debug_path_exists_force_disabled, + debug_clone_completed, + default_branch, + dry_run_branch, + dry_run_command, + dry_run_command_would_be_executed, + dry_run_force_mode, + dry_run_mode, + dry_run_repository, + dry_run_target_path, + end_dry_run, + failed_to_prepare_target_directory, + invalid_path, + invalid_repo, + invalid_repository_url, + path_already_exists_use_force, + path_exists_will_overwrite, + path_exists_would_fail, + prerequisites_validation_failed, + successfully_cloned, + target_path_not_exists, + unknown_error, +) + + +class GitCloneProtocol(Protocol): + def clone_repository(self, repo: str, path: str, branch: str = None) -> tuple[bool, str]: ... + + +class GitCommandBuilder: + @staticmethod + def build_clone_command(repo: str, path: str, branch: str = None) -> list[str]: + cmd = ["git", "clone"] + if branch: + cmd.extend(["-b", branch]) + cmd.extend([repo, path]) + return cmd + + +class CloneFormatter: + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, result: "CloneResult", output: str) -> str: + if result.success: + message = successfully_cloned.format(repo=result.repo, path=result.path) + output_message = self.output_formatter.create_success_message(message, result.model_dump()) + else: + error = result.error or unknown_error + output_message = self.output_formatter.create_error_message(error, result.model_dump()) + + return self.output_formatter.format_output(output_message, output) + + def format_dry_run(self, config: "CloneConfig") -> str: + cmd = GitCommandBuilder.build_clone_command(config.repo, config.path, config.branch) + + output = [] + output.append(dry_run_mode) + output.append(dry_run_command_would_be_executed) + output.append(dry_run_command.format(command=" ".join(cmd))) + output.append(dry_run_repository.format(repo=config.repo)) + output.append(dry_run_branch.format(branch=config.branch or default_branch)) + output.append(dry_run_target_path.format(path=config.path)) + output.append(dry_run_force_mode.format(force=config.force)) + + self._add_path_status_message(output, config.path, config.force) + + output.append(end_dry_run) + return "\n".join(output) + + def _add_path_status_message(self, output: list[str], path: str, force: bool) -> None: + if os.path.exists(path): + if force: + output.append(path_exists_will_overwrite.format(path=path)) + else: + output.append(path_exists_would_fail.format(path=path)) + else: + output.append(target_path_not_exists.format(path=path)) + + +class GitClone: + def __init__(self, logger: LoggerProtocol): + self.logger = logger + + def clone_repository(self, repo: str, path: str, branch: str = None) -> tuple[bool, str]: + cmd = GitCommandBuilder.build_clone_command(repo, path, branch) + + self.logger.debug(debug_executing_git_clone.format(command=' '.join(cmd))) + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + self.logger.debug(debug_git_clone_success) + return True, None + except subprocess.CalledProcessError as e: + self.logger.debug(debug_git_clone_failed.format(code=e.returncode, error=e.stderr)) + return False, e.stderr + except Exception as e: + self.logger.debug(debug_unexpected_error.format(error_type=type(e).__name__, error=str(e))) + return False, str(e) + + +class CloneResult(BaseModel): + repo: str + path: str + branch: Optional[str] + force: bool + verbose: bool + output: str = "" + success: bool = False + error: Optional[str] = None + + +class CloneConfig(BaseModel): + repo: str = Field(..., min_length=1, description="Repository URL to clone") + branch: Optional[str] = Field("master", description="Branch to clone") + path: str = Field(..., min_length=1, description="Target path for cloning") + force: bool = Field(False, description="Force overwrite if path exists") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format: text, json") + dry_run: bool = Field(False, description="Dry run mode") + + @field_validator("repo") + @classmethod + def validate_repo(cls, repo: str) -> str: + stripped_repo = repo.strip() + if not stripped_repo: + raise ValueError(invalid_repo) + + if not cls._is_valid_repo_format(stripped_repo): + raise ValueError(invalid_repository_url) + return stripped_repo + + @staticmethod + def _is_valid_repo_format(repo: str) -> bool: + return ( + repo.startswith(("http://", "https://", "git://", "ssh://")) + or (repo.endswith(".git") and not repo.startswith("github.com:")) + or ("@" in repo and ":" in repo and repo.count("@") == 1) + ) + + @field_validator("path") + @classmethod + def validate_path(cls, path: str) -> str: + stripped_path = path.strip() + if not stripped_path: + raise ValueError(invalid_path) + return stripped_path + + @field_validator("branch") + @classmethod + def validate_branch(cls, branch: str) -> Optional[str]: + if not branch: + return None + stripped_branch = branch.strip() + if not stripped_branch: + return None + return stripped_branch + + +class CloneService: + def __init__(self, config: CloneConfig, logger: LoggerProtocol = None, cloner: GitCloneProtocol = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.cloner = cloner or GitClone(self.logger) + self.formatter = CloneFormatter() + self.dir_manager = DirectoryManager() + + def _prepare_target_directory(self) -> bool: + if self.config.force and os.path.exists(self.config.path): + self.logger.debug(debug_removing_directory.format(path=self.config.path)) + success = self.dir_manager.remove_directory(self.config.path, self.logger) + if not success: + self.logger.debug(debug_directory_removal_failed) + return success + return True + + def _validate_prerequisites(self) -> bool: + if self.dir_manager.path_exists_and_not_force(self.config.path, self.config.force): + self.logger.debug(debug_path_exists_force_disabled.format(path=self.config.path)) + self.logger.error(path_already_exists_use_force.format(path=self.config.path)) + return False + return True + + def _create_result(self, success: bool, error: str = None) -> CloneResult: + result = CloneResult( + repo=self.config.repo, + path=self.config.path, + branch=self.config.branch, + force=self.config.force, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + ) + result.output = self.formatter.format_output(result, self.config.output) + return result + + def clone(self) -> CloneResult: + import time + start_time = time.time() + + self.logger.debug(debug_cloning_repo.format(repo=self.config.repo, path=self.config.path, force=self.config.force)) + + if not self._validate_prerequisites(): + return self._create_result(False, prerequisites_validation_failed) + + if not self._prepare_target_directory(): + return self._create_result(False, failed_to_prepare_target_directory) + + success, error = self.cloner.clone_repository(self.config.repo, self.config.path, self.config.branch) + + duration = time.time() - start_time + self.logger.debug(debug_clone_completed.format(duration=f"{duration:.2f}", success=success)) + + return self._create_result(success, error) + + def clone_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.clone() + return result.output + + +class Clone: + def __init__(self, logger: LoggerProtocol = None): + self.logger = logger + self.formatter = CloneFormatter() + + def clone(self, config: CloneConfig) -> CloneResult: + service = CloneService(config, logger=self.logger) + return service.clone() + + def clone_and_format(self, config: CloneConfig) -> str: + service = CloneService(config, logger=self.logger) + return service.clone_and_format() + + def format_output(self, result: CloneResult, output: str) -> str: + return self.formatter.format_output(result, output) diff --git a/cli/app/commands/clone/command.py b/cli/app/commands/clone/command.py new file mode 100644 index 00000000..eeb00696 --- /dev/null +++ b/cli/app/commands/clone/command.py @@ -0,0 +1,104 @@ +import typer + +from app.utils.logger import Logger +from app.utils.config import Config, DEFAULT_REPO, DEFAULT_BRANCH, DEFAULT_PATH, NIXOPUS_CONFIG_DIR +from app.utils.timeout import TimeoutWrapper + +from .clone import Clone, CloneConfig +from .messages import ( + debug_clone_command_invoked, + debug_repo_param, + debug_branch_param, + debug_path_param, + debug_force_param, + debug_verbose_param, + debug_output_param, + debug_dry_run_param, + debug_timeout_param, + debug_config_created, + debug_action_created, + debug_timeout_wrapper_created, + debug_executing_with_timeout, + debug_timeout_completed, + debug_timeout_error, + debug_executing_dry_run, + debug_dry_run_completed, + debug_clone_operation_result, + debug_clone_operation_failed, + debug_clone_operation_completed, + debug_exception_caught, + debug_exception_details, +) + +config = Config() +nixopus_config_dir = config.get_yaml_value(NIXOPUS_CONFIG_DIR) +repo = config.get_yaml_value(DEFAULT_REPO) +branch = config.get_yaml_value(DEFAULT_BRANCH) +path = nixopus_config_dir + "/" + config.get_yaml_value(DEFAULT_PATH) + +clone_app = typer.Typer(help="Clone a repository", invoke_without_command=True) + +@clone_app.callback() +def clone_callback( + repo: str = typer.Option(repo, "--repo", "-r", help="The repository to clone"), + branch: str = typer.Option(branch, "--branch", "-b", help="The branch to clone"), + path: str = typer.Option(path, "--path", "-p", help="The path to clone the repository to"), + force: bool = typer.Option(False, "--force", "-f", help="Force the clone"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Clone a repository""" + try: + logger = Logger(verbose=verbose) + logger.debug(debug_clone_command_invoked) + logger.debug(debug_repo_param.format(repo=repo)) + logger.debug(debug_branch_param.format(branch=branch)) + logger.debug(debug_path_param.format(path=path)) + logger.debug(debug_force_param.format(force=force)) + logger.debug(debug_verbose_param.format(verbose=verbose)) + logger.debug(debug_output_param.format(output=output)) + logger.debug(debug_dry_run_param.format(dry_run=dry_run)) + logger.debug(debug_timeout_param.format(timeout=timeout)) + + config = CloneConfig(repo=repo, branch=branch, path=path, force=force, verbose=verbose, output=output, dry_run=dry_run) + logger.debug(debug_config_created.format(config_type="CloneConfig")) + + clone_operation = Clone(logger=logger) + logger.debug(debug_action_created.format(action_type="Clone")) + + logger.debug(debug_timeout_wrapper_created.format(timeout=timeout)) + logger.debug(debug_executing_with_timeout.format(timeout=timeout)) + + with TimeoutWrapper(timeout): + if config.dry_run: + logger.debug(debug_executing_dry_run) + formatted_output = clone_operation.clone_and_format(config) + logger.success(formatted_output) + logger.debug(debug_dry_run_completed) + else: + result = clone_operation.clone(config) + logger.debug(debug_clone_operation_result.format(success=result.success)) + + if not result.success: + logger.error(result.output) + logger.debug(debug_clone_operation_failed) + raise typer.Exit(1) + + logger.debug(debug_clone_operation_completed) + logger.success(result.output) + + logger.debug(debug_timeout_completed) + + except TimeoutError as e: + logger.debug(debug_timeout_error.format(error=str(e))) + if not isinstance(e, typer.Exit): + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + logger.debug(debug_exception_caught.format(error_type=type(e).__name__, error=str(e))) + logger.debug(debug_exception_details.format(error=e)) + if not isinstance(e, typer.Exit): + logger.error(str(e)) + raise typer.Exit(1) diff --git a/cli/app/commands/clone/messages.py b/cli/app/commands/clone/messages.py new file mode 100644 index 00000000..b4c6627d --- /dev/null +++ b/cli/app/commands/clone/messages.py @@ -0,0 +1,51 @@ +debug_cloning_repo = "Cloning {repo} to {path} (force: {force})" +debug_executing_git_clone = "Executing git clone: {command}" +debug_git_clone_success = "Git clone completed successfully" +debug_git_clone_failed = "Git clone failed (code: {code}): {error}" +debug_unexpected_error = "Unexpected error: {error_type}: {error}" +debug_removing_directory = "Removing existing directory: {path}" +debug_directory_removal_failed = "Failed to remove existing directory" +debug_path_exists_force_disabled = "Path exists and force disabled: {path}" +debug_clone_completed = "Clone completed in {duration}s - success: {success}" +debug_clone_command_invoked = "Clone command invoked with parameters:" +debug_repo_param = " repo: {repo}" +debug_branch_param = " branch: {branch}" +debug_path_param = " path: {path}" +debug_force_param = " force: {force}" +debug_verbose_param = " verbose: {verbose}" +debug_output_param = " output: {output}" +debug_dry_run_param = " dry_run: {dry_run}" +debug_executing_dry_run = "Executing dry run mode" +debug_dry_run_completed = "Dry run completed successfully" +debug_clone_operation_result = "Clone operation result - success: {success}" +debug_clone_operation_failed = "Clone operation failed, raising exit" +debug_clone_operation_completed = "Clone operation completed successfully" +debug_exception_caught = "Exception caught in clone callback: {error_type}: {error}" +debug_exception_details = "Exception details: {error}" +debug_timeout_param = " timeout: {timeout}" +debug_config_created = "Created {config_type} with parameters" +debug_action_created = "Created {action_type} action instance" +debug_timeout_wrapper_created = "TimeoutWrapper created with {timeout}s timeout" +debug_executing_with_timeout = "Executing clone operation with {timeout}s timeout" +debug_timeout_completed = "Timeout wrapper completed successfully" +debug_timeout_error = "Timeout error occurred: {error}" +path_already_exists_use_force = "Path {path} already exists. Use --force to overwrite." +prerequisites_validation_failed = "Prerequisites validation failed" +failed_to_prepare_target_directory = "Failed to prepare target directory" +invalid_repo = "Invalid repository format" +invalid_repository_url = "Invalid repository URL format" +invalid_path = "Invalid path format" +unknown_error = "Unknown error" +successfully_cloned = "Successfully cloned {repo} to {path}" +dry_run_mode = "=== DRY RUN MODE ===" +dry_run_command_would_be_executed = "The following command would be executed:" +dry_run_command = "Command: {command}" +dry_run_repository = "Repository: {repo}" +dry_run_branch = "Branch: {branch}" +dry_run_target_path = "Target path: {path}" +dry_run_force_mode = "Force mode: {force}" +path_exists_will_overwrite = "Path {path} exists and will be overwritten (force mode)" +path_exists_would_fail = "Path {path} exists - clone would fail without --force" +target_path_not_exists = "Target path {path} does not exist" +end_dry_run = "=== END DRY RUN ===" +default_branch = "default" \ No newline at end of file diff --git a/cli/app/commands/conf/__init__.py b/cli/app/commands/conf/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/app/commands/conf/base.py b/cli/app/commands/conf/base.py new file mode 100644 index 00000000..9e4755c9 --- /dev/null +++ b/cli/app/commands/conf/base.py @@ -0,0 +1,230 @@ +import os +import shutil +import tempfile +from typing import Dict, Generic, Optional, Protocol, TypeVar + +from pydantic import BaseModel, Field, field_validator + +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol +from app.utils.config import Config, API_ENV_FILE, VIEW_ENV_FILE + +from .messages import ( + backup_created, + backup_creation_failed, + backup_file_not_found, + backup_remove_failed, + backup_removed, + backup_restore_attempt, + backup_restore_failed, + backup_restore_success, + file_not_exists, + file_not_found, + file_read_failed, + file_write_failed, + invalid_line_warning, + invalid_service, + read_error, + read_success, + reading_env_file, +) + +TConfig = TypeVar("TConfig", bound=BaseModel) +TResult = TypeVar("TResult", bound=BaseModel) + + +class EnvironmentServiceProtocol(Protocol): + def list_config(self, service: str, env_file: str = None) -> tuple[bool, Dict[str, str], str]: ... + + def set_config(self, service: str, key: str, value: str, env_file: str = None) -> tuple[bool, str]: ... + + def delete_config(self, service: str, key: str, env_file: str = None) -> tuple[bool, str]: ... + + +class BaseEnvironmentManager: + def __init__(self, logger: LoggerProtocol): + self.logger = logger + + def read_env_file(self, file_path: str) -> tuple[bool, Dict[str, str], Optional[str]]: + self.logger.debug(reading_env_file.format(file_path=file_path)) + try: + if not os.path.exists(file_path): + self.logger.debug(file_not_exists.format(file_path=file_path)) + return False, {}, file_not_found.format(path=file_path) + + config = {} + with open(file_path, "r") as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + if not line or line.startswith("#"): + continue + + if "=" not in line: + self.logger.warning(invalid_line_warning.format(line_num=line_num, file_path=file_path, line=line)) + continue + + key, value = line.split("=", 1) + config[key.strip()] = value.strip() + + self.logger.debug(read_success.format(count=len(config), file_path=file_path)) + return True, config, None + except Exception as e: + self.logger.debug(read_error.format(file_path=file_path, error=e)) + return False, {}, file_read_failed.format(error=e) + + def _create_backup(self, file_path: str) -> tuple[bool, Optional[str], Optional[str]]: + if not os.path.exists(file_path): + return True, None, None + + try: + backup_path = f"{file_path}.backup" + shutil.copy2(file_path, backup_path) + return True, backup_path, None + except Exception as e: + return False, None, backup_creation_failed.format(error=e) + + def _restore_backup(self, backup_path: str, file_path: str) -> tuple[bool, Optional[str]]: + try: + if os.path.exists(backup_path): + shutil.copy2(backup_path, file_path) + os.remove(backup_path) + return True, None + return False, backup_file_not_found.format(path=backup_path) + except Exception as e: + return False, backup_restore_failed.format(error=e) + + def _atomic_write(self, file_path: str, config: Dict[str, str]) -> tuple[bool, Optional[str]]: + temp_path = None + try: + os.makedirs(os.path.dirname(file_path), exist_ok=True) + + with tempfile.NamedTemporaryFile(mode="w", delete=False, dir=os.path.dirname(file_path)) as temp_file: + for key, value in sorted(config.items()): + temp_file.write(f"{key}={value}\n") + temp_file.flush() + try: + os.fsync(temp_file.fileno()) + except (OSError, AttributeError): + pass + temp_path = temp_file.name + + os.replace(temp_path, file_path) + return True, None + except Exception as e: + if temp_path and os.path.exists(temp_path): + try: + os.unlink(temp_path) + except: + pass + return False, file_write_failed.format(error=e) + + def write_env_file(self, file_path: str, config: Dict[str, str]) -> tuple[bool, Optional[str]]: + backup_created_flag = False + backup_path = None + + try: + success, backup_path, error = self._create_backup(file_path) + if not success: + return False, error + + backup_created_flag = True + self.logger.debug(backup_created.format(backup_path=backup_path)) + + success, error = self._atomic_write(file_path, config) + if not success: + if backup_created_flag and backup_path: + self.logger.warning(backup_restore_attempt) + restore_success, restore_error = self._restore_backup(backup_path, file_path) + if restore_success: + self.logger.debug(backup_restore_success) + else: + self.logger.error(backup_restore_failed.format(error=restore_error)) + return False, error + + if backup_created_flag and backup_path and os.path.exists(backup_path): + try: + os.remove(backup_path) + self.logger.debug(backup_removed) + except Exception as e: + self.logger.warning(backup_remove_failed.format(error=e)) + + return True, None + + except Exception as e: + return False, file_write_failed.format(error=e) + + def get_service_env_file(self, service: str, env_file: Optional[str] = None) -> str: + if env_file: + return env_file + + config = Config() + if service == "api": + default_path = config.get_yaml_value(API_ENV_FILE) + return default_path + elif service == "view": + default_path = config.get_yaml_value(VIEW_ENV_FILE) + return default_path + else: + raise ValueError(invalid_service.format(service=service)) + + +class BaseConfig(BaseModel): + service: str = Field("api", description="The name of the service to manage configuration for") + key: Optional[str] = Field(None, description="The configuration key") + value: Optional[str] = Field(None, description="The configuration value") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format: text, json") + dry_run: bool = Field(False, description="Dry run mode") + env_file: Optional[str] = Field(None, description="Path to the environment file") + + @field_validator("env_file") + @classmethod + def validate_env_file(cls, env_file: str) -> Optional[str]: + if not env_file: + return None + stripped_env_file = env_file.strip() + if not stripped_env_file: + return None + if not os.path.exists(stripped_env_file): + raise ValueError(file_not_found.format(path=stripped_env_file)) + return stripped_env_file + + +class BaseResult(BaseModel): + service: str + key: Optional[str] = None + value: Optional[str] = None + config: Dict[str, str] = Field(default_factory=dict) + verbose: bool + output: str + success: bool = False + error: Optional[str] = None + + +class BaseService(Generic[TConfig, TResult]): + def __init__(self, config: TConfig, logger: LoggerProtocol = None, environment_service: EnvironmentServiceProtocol = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.environment_service = environment_service + self.formatter = None + + def _create_result(self, success: bool, error: str = None, config_dict: Dict[str, str] = None) -> TResult: + raise NotImplementedError + + def execute(self) -> TResult: + raise NotImplementedError + + def execute_and_format(self) -> str: + raise NotImplementedError + + +class BaseAction(Generic[TConfig, TResult]): + def __init__(self, logger: LoggerProtocol = None): + self.logger = logger + self.formatter = None + + def execute(self, config: TConfig) -> TResult: + raise NotImplementedError + + def format_output(self, result: TResult, output: str) -> str: + raise NotImplementedError diff --git a/cli/app/commands/conf/command.py b/cli/app/commands/conf/command.py new file mode 100644 index 00000000..b62ea039 --- /dev/null +++ b/cli/app/commands/conf/command.py @@ -0,0 +1,247 @@ +import typer + +from app.utils.logger import Logger +from app.utils.timeout import TimeoutWrapper + +from .delete import Delete, DeleteConfig +from .list import List, ListConfig +from .messages import ( + argument_must_be_in_form, + debug_conf_command_invoked, + debug_service_param, + debug_key_param, + debug_value_param, + debug_verbose_param, + debug_output_param, + debug_dry_run_param, + debug_env_file_param, + debug_timeout_param, + debug_executing_dry_run, + debug_dry_run_completed, + debug_conf_operation_result, + debug_conf_operation_failed, + debug_conf_operation_completed, + debug_exception_caught, + debug_exception_details, + debug_parsing_key_value, + debug_key_value_parsed, + debug_key_value_parse_failed, + debug_config_created, + debug_action_created, + debug_timeout_wrapper_created, + debug_executing_with_timeout, + debug_timeout_completed, + debug_timeout_error, +) +from .set import Set, SetConfig + +conf_app = typer.Typer(help="Manage configuration") + + +@conf_app.command() +def list( + service: str = typer.Option( + "api", "--service", "-s", help="The name of the service to list configuration for, e.g api,view" + ), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """List all configuration""" + try: + logger = Logger(verbose=verbose) + + logger.debug(debug_conf_command_invoked) + logger.debug(debug_service_param.format(service=service)) + logger.debug(debug_verbose_param.format(verbose=verbose)) + logger.debug(debug_output_param.format(output=output)) + logger.debug(debug_dry_run_param.format(dry_run=dry_run)) + logger.debug(debug_env_file_param.format(env_file=env_file)) + logger.debug(debug_timeout_param.format(timeout=timeout)) + + config = ListConfig(service=service, verbose=verbose, output=output, dry_run=dry_run, env_file=env_file) + logger.debug(debug_config_created.format(config_type="ListConfig")) + + list_action = List(logger=logger) + logger.debug(debug_action_created.format(action_type="List")) + + logger.debug(debug_timeout_wrapper_created.format(timeout=timeout)) + logger.debug(debug_executing_with_timeout.format(timeout=timeout)) + + with TimeoutWrapper(timeout): + if config.dry_run: + logger.debug(debug_executing_dry_run) + formatted_output = list_action.list_and_format(config) + logger.info(formatted_output) + logger.debug(debug_dry_run_completed) + else: + result = list_action.list(config) + logger.debug(debug_conf_operation_result.format(success=result.success)) + + if result.success: + formatted_output = list_action.format_output(result, output) + logger.success(formatted_output) + logger.debug(debug_conf_operation_completed) + else: + logger.error(result.error) + logger.debug(debug_conf_operation_failed) + raise typer.Exit(1) + + logger.debug(debug_timeout_completed) + + except TimeoutError as e: + logger.debug(debug_timeout_error.format(error=str(e))) + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + logger.debug(debug_exception_caught.format(error_type=type(e).__name__, error=str(e))) + logger.debug(debug_exception_details.format(error=e)) + if not isinstance(e, typer.Exit): + logger.error(str(e)) + raise typer.Exit(1) + + +@conf_app.command() +def delete( + service: str = typer.Option( + "api", "--service", "-s", help="The name of the service to delete configuration for, e.g api,view" + ), + key: str = typer.Argument(..., help="The key of the configuration to delete"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Delete a configuration""" + try: + logger = Logger(verbose=verbose) + + logger.debug(debug_conf_command_invoked) + logger.debug(debug_service_param.format(service=service)) + logger.debug(debug_key_param.format(key=key)) + logger.debug(debug_verbose_param.format(verbose=verbose)) + logger.debug(debug_output_param.format(output=output)) + logger.debug(debug_dry_run_param.format(dry_run=dry_run)) + logger.debug(debug_env_file_param.format(env_file=env_file)) + logger.debug(debug_timeout_param.format(timeout=timeout)) + + config = DeleteConfig(service=service, key=key, verbose=verbose, output=output, dry_run=dry_run, env_file=env_file) + logger.debug(debug_config_created.format(config_type="DeleteConfig")) + + delete_action = Delete(logger=logger) + logger.debug(debug_action_created.format(action_type="Delete")) + + logger.debug(debug_timeout_wrapper_created.format(timeout=timeout)) + logger.debug(debug_executing_with_timeout.format(timeout=timeout)) + + with TimeoutWrapper(timeout): + if config.dry_run: + logger.debug(debug_executing_dry_run) + formatted_output = delete_action.delete_and_format(config) + logger.info(formatted_output) + logger.debug(debug_dry_run_completed) + else: + result = delete_action.delete(config) + logger.debug(debug_conf_operation_result.format(success=result.success)) + + if result.success: + formatted_output = delete_action.format_output(result, output) + logger.success(formatted_output) + logger.debug(debug_conf_operation_completed) + else: + logger.error(result.error) + logger.debug(debug_conf_operation_failed) + raise typer.Exit(1) + + logger.debug(debug_timeout_completed) + + except TimeoutError as e: + logger.debug(debug_timeout_error.format(error=str(e))) + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + logger.debug(debug_exception_caught.format(error_type=type(e).__name__, error=str(e))) + logger.debug(debug_exception_details.format(error=e)) + if not isinstance(e, typer.Exit): + logger.error(str(e)) + raise typer.Exit(1) + + +@conf_app.command() +def set( + service: str = typer.Option( + "api", "--service", "-s", help="The name of the service to set configuration for, e.g api,view" + ), + key_value: str = typer.Argument(..., help="Configuration in the form KEY=VALUE"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Set a configuration""" + try: + logger = Logger(verbose=verbose) + + logger.debug(debug_conf_command_invoked) + logger.debug(debug_service_param.format(service=service)) + logger.debug(debug_verbose_param.format(verbose=verbose)) + logger.debug(debug_output_param.format(output=output)) + logger.debug(debug_dry_run_param.format(dry_run=dry_run)) + logger.debug(debug_env_file_param.format(env_file=env_file)) + logger.debug(debug_timeout_param.format(timeout=timeout)) + logger.debug(debug_parsing_key_value.format(key_value=key_value)) + + if "=" not in key_value: + logger.debug(debug_key_value_parse_failed.format(key_value=key_value)) + logger.error(argument_must_be_in_form) + raise typer.Exit(1) + + key, value = key_value.split("=", 1) + logger.debug(debug_key_value_parsed.format(key=key, value=value)) + + config = SetConfig( + service=service, key=key, value=value, verbose=verbose, output=output, dry_run=dry_run, env_file=env_file + ) + logger.debug(debug_config_created.format(config_type="SetConfig")) + + set_action = Set(logger=logger) + logger.debug(debug_action_created.format(action_type="Set")) + + logger.debug(debug_timeout_wrapper_created.format(timeout=timeout)) + logger.debug(debug_executing_with_timeout.format(timeout=timeout)) + + with TimeoutWrapper(timeout): + if config.dry_run: + logger.debug(debug_executing_dry_run) + formatted_output = set_action.set_and_format(config) + logger.info(formatted_output) + logger.debug(debug_dry_run_completed) + else: + result = set_action.set(config) + logger.debug(debug_conf_operation_result.format(success=result.success)) + + if result.success: + formatted_output = set_action.format_output(result, output) + logger.success(formatted_output) + logger.debug(debug_conf_operation_completed) + else: + logger.error(result.error) + logger.debug(debug_conf_operation_failed) + raise typer.Exit(1) + + logger.debug(debug_timeout_completed) + + except TimeoutError as e: + logger.debug(debug_timeout_error.format(error=str(e))) + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + logger.debug(debug_exception_caught.format(error_type=type(e).__name__, error=str(e))) + logger.debug(debug_exception_details.format(error=e)) + if not isinstance(e, typer.Exit): + logger.error(str(e)) + raise typer.Exit(1) diff --git a/cli/app/commands/conf/delete.py b/cli/app/commands/conf/delete.py new file mode 100644 index 00000000..e31db571 --- /dev/null +++ b/cli/app/commands/conf/delete.py @@ -0,0 +1,173 @@ +import os +from typing import Dict, Optional, Protocol + +from pydantic import BaseModel, Field + +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseConfig, BaseEnvironmentManager, BaseResult, BaseService +from .messages import ( + config_key_not_found, + configuration_delete_failed, + configuration_deleted, + dry_run_delete_config, + dry_run_mode, + end_dry_run, + key_required_delete, + debug_deleting_config_key, + debug_config_key_deleted, + debug_config_key_not_found_delete, + debug_service_env_file_resolved, + debug_config_file_exists, + debug_config_file_not_exists, + debug_config_file_read_success, + debug_config_file_read_failed, + debug_config_file_write_failed, + debug_dry_run_simulation, + debug_dry_run_simulation_complete, + debug_validation_failed, +) + + +class EnvironmentServiceProtocol(Protocol): + def delete_config(self, service: str, key: str, env_file: str = None) -> tuple[bool, str]: ... + + +class EnvironmentManager(BaseEnvironmentManager): + def delete_config(self, service: str, key: str, env_file: Optional[str] = None) -> tuple[bool, Optional[str]]: + file_path = self.get_service_env_file(service, env_file) + self.logger.debug(debug_service_env_file_resolved.format(file_path=file_path)) + + if self.logger.verbose: + if os.path.exists(file_path): + self.logger.debug(debug_config_file_exists.format(file_path=file_path)) + else: + self.logger.debug(debug_config_file_not_exists.format(file_path=file_path)) + + success, config, error = self.read_env_file(file_path) + if not success: + self.logger.debug(debug_config_file_read_failed.format(error=error)) + return False, error + + self.logger.debug(debug_config_file_read_success.format(count=len(config))) + + if key not in config: + self.logger.debug(debug_config_key_not_found_delete.format(key=key)) + return False, config_key_not_found.format(key=key) + + self.logger.debug(debug_deleting_config_key.format(key=key)) + del config[key] + + success, error = self.write_env_file(file_path, config) + + if success: + self.logger.debug(debug_config_key_deleted.format(key=key)) + else: + self.logger.debug(debug_config_file_write_failed.format(error=error)) + + return success, error + + +class DeleteResult(BaseResult): + pass + + +class DeleteConfig(BaseConfig): + key: str = Field(..., description="The key of the configuration to delete") + + +class DeleteService(BaseService[DeleteConfig, DeleteResult]): + def __init__( + self, config: DeleteConfig, logger: LoggerProtocol = None, environment_service: EnvironmentServiceProtocol = None + ): + super().__init__(config, logger, environment_service) + self.environment_service = environment_service or EnvironmentManager(self.logger) + + def _create_result(self, success: bool, error: str = None, config_dict: Dict[str, str] = None) -> DeleteResult: + return DeleteResult( + service=self.config.service, + key=self.config.key, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + config=config_dict or {}, + ) + + def delete(self) -> DeleteResult: + return self.execute() + + def execute(self) -> DeleteResult: + if not self.config.key: + self.logger.debug(debug_validation_failed.format(error="Key is required")) + return self._create_result(False, error=key_required_delete) + + if self.config.dry_run: + self.logger.debug(debug_dry_run_simulation) + result = self._create_result(True) + self.logger.debug(debug_dry_run_simulation_complete) + return result + + success, error = self.environment_service.delete_config(self.config.service, self.config.key, self.config.env_file) + + if success: + return self._create_result(True) + else: + return self._create_result(False, error=error) + + def delete_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self._format_dry_run() + + result = self.execute() + return self._format_output(result, self.config.output) + + def _format_dry_run(self) -> str: + lines = [dry_run_mode] + lines.append(dry_run_delete_config.format(service=self.config.service, key=self.config.key)) + lines.append(end_dry_run) + return "\n".join(lines) + + def _format_output(self, result: DeleteResult, output_format: str) -> str: + if output_format == "json": + formatted = self._format_json(result) + else: + formatted = self._format_text(result) + + return formatted + + def _format_json(self, result: DeleteResult) -> str: + import json + + output = {"service": result.service, "key": result.key, "success": result.success, "error": result.error} + return json.dumps(output, indent=2) + + def _format_text(self, result: DeleteResult) -> str: + if not result.success: + return configuration_delete_failed.format(service=result.service, error=result.error) + + return configuration_deleted.format(service=result.service, key=result.key) + + +class Delete(BaseAction[DeleteConfig, DeleteResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + + def delete(self, config: DeleteConfig) -> DeleteResult: + return self.execute(config) + + def execute(self, config: DeleteConfig) -> DeleteResult: + service = DeleteService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: DeleteResult, output: str) -> str: + service = DeleteService(result, logger=self.logger) + return service._format_output(result, output) + + def delete_and_format(self, config: DeleteConfig) -> str: + service = DeleteService(config, logger=self.logger) + return service.execute_and_format() diff --git a/cli/app/commands/conf/list.py b/cli/app/commands/conf/list.py new file mode 100644 index 00000000..af1928a0 --- /dev/null +++ b/cli/app/commands/conf/list.py @@ -0,0 +1,175 @@ +import os +from typing import Dict, Optional, Protocol + +from pydantic import BaseModel, Field + +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol +from app.utils.output_formatter import OutputFormatter + +from .base import BaseAction, BaseConfig, BaseEnvironmentManager, BaseResult, BaseService +from .messages import ( + configuration_list_failed, + configuration_listed, + dry_run_list_config, + dry_run_mode, + end_dry_run, + no_configuration_found, + debug_listing_config, + debug_config_listed, + debug_no_config_to_list, + debug_service_env_file_resolved, + debug_config_file_exists, + debug_config_file_not_exists, + debug_config_file_read_success, + debug_config_file_read_failed, + debug_dry_run_simulation, + debug_dry_run_simulation_complete, + configuration_list_title, +) + + +class EnvironmentServiceProtocol(Protocol): + def list_config(self, service: str, env_file: str = None) -> tuple[bool, Dict[str, str], str]: ... + + +class EnvironmentManager(BaseEnvironmentManager): + def list_config(self, service: str, env_file: Optional[str] = None) -> tuple[bool, Dict[str, str], Optional[str]]: + file_path = self.get_service_env_file(service, env_file) + self.logger.debug(debug_service_env_file_resolved.format(file_path=file_path)) + + if self.logger.verbose: + if os.path.exists(file_path): + self.logger.debug(debug_config_file_exists.format(file_path=file_path)) + else: + self.logger.debug(debug_config_file_not_exists.format(file_path=file_path)) + + success, config_dict, error = self.read_env_file(file_path) + + if success: + self.logger.debug(debug_config_file_read_success.format(count=len(config_dict))) + else: + self.logger.debug(debug_config_file_read_failed.format(error=error)) + + return success, config_dict, error + + +class ListResult(BaseResult): + pass + + +class ListConfig(BaseConfig): + pass + + +class ListService(BaseService[ListConfig, ListResult]): + def __init__( + self, config: ListConfig, logger: LoggerProtocol = None, environment_service: EnvironmentServiceProtocol = None + ): + super().__init__(config, logger, environment_service) + self.environment_service = environment_service or EnvironmentManager(self.logger) + self.formatter = OutputFormatter() + + def _create_result(self, success: bool, error: str = None, config_dict: Dict[str, str] = None) -> ListResult: + return ListResult( + service=self.config.service, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + config=config_dict or {}, + ) + + def list(self) -> ListResult: + return self.execute() + + def execute(self) -> ListResult: + self.logger.debug(debug_listing_config.format(service=self.config.service)) + + if self.config.dry_run: + self.logger.debug(debug_dry_run_simulation) + result = self._create_result(True) + self.logger.debug(debug_dry_run_simulation_complete) + return result + + success, config_dict, error = self.environment_service.list_config(self.config.service, self.config.env_file) + + if success: + if config_dict: + self.logger.debug(debug_config_listed.format(count=len(config_dict))) + else: + self.logger.debug(debug_no_config_to_list) + self.logger.info(configuration_listed.format(service=self.config.service)) + return self._create_result(True, config_dict=config_dict) + else: + self.logger.error(configuration_list_failed.format(service=self.config.service, error=error)) + return self._create_result(False, error=error) + + def list_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self._format_dry_run() + + result = self.execute() + return self._format_output(result, self.config.output) + + def _format_dry_run(self) -> str: + lines = [dry_run_mode] + lines.append(dry_run_list_config.format(service=self.config.service)) + lines.append(end_dry_run) + return "\n".join(lines) + + def _format_output(self, result: ListResult, output_format: str) -> str: + if output_format == "json": + formatted = self._format_json(result) + else: + formatted = self._format_text(result, output_format) + + return formatted + + def _format_json(self, result: ListResult) -> str: + import json + + output = {"service": result.service, "success": result.success, "error": result.error, "config": result.config} + return json.dumps(output, indent=2) + + def _format_text(self, result: ListResult, output_format: str) -> str: + if not result.success: + return configuration_list_failed.format(service=result.service, error=result.error) + + if result.config: + success_message = configuration_listed.format(service=result.service) + title = configuration_list_title.format(service=result.service) + headers = ("Key", "Value") + + return self.formatter.format_table_output( + data=result.config, + output_format=output_format, + success_message=success_message, + title=title, + headers=headers + ) + else: + return no_configuration_found.format(service=result.service) + + +class List(BaseAction[ListConfig, ListResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + + def list(self, config: ListConfig) -> ListResult: + return self.execute(config) + + def execute(self, config: ListConfig) -> ListResult: + service = ListService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: ListResult, output: str) -> str: + service = ListService(result, logger=self.logger) + return service._format_output(result, output) + + def list_and_format(self, config: ListConfig) -> str: + service = ListService(config, logger=self.logger) + return service.execute_and_format() diff --git a/cli/app/commands/conf/messages.py b/cli/app/commands/conf/messages.py new file mode 100644 index 00000000..dc36905b --- /dev/null +++ b/cli/app/commands/conf/messages.py @@ -0,0 +1,78 @@ +configuration_listed = "Configuration listed successfully for service: {service}" +configuration_list_failed = "Failed to list configuration for service: {service}: {error}" +no_configuration_found = "No configuration found for service: {service}" +configuration_set = "Configuration set successfully: {key}={value} for service: {service}" +configuration_set_failed = "Failed to set configuration for service: {service}: {error}" +key_required = "Key is required for set operation" +value_required = "Value is required for set operation" +configuration_deleted = "Configuration deleted successfully: {key} for service: {service}" +configuration_delete_failed = "Failed to delete configuration for service: {service}: {error}" +key_not_found = "Configuration key '{key}' not found for service: {service}" +key_required_delete = "Key is required for delete operation" +dry_run_mode = "DRY RUN MODE - No changes will be made" +dry_run_list_config = "Would list configuration for service: {service}" +dry_run_set_config = "Would set configuration: {key}={value} for service: {service}" +dry_run_delete_config = "Would delete configuration: {key} for service: {service}" +end_dry_run = "DRY RUN COMPLETE" +file_read_failed = "Failed to read environment file: {error}" +file_write_failed = "Failed to write environment file: {error}" +file_not_found = "Environment file not found: {path}" +invalid_line_warning = "Invalid line {line_num} in {file_path}: {line}" +backup_created = "Backup created: {backup_path}" +backup_removed = "Backup removed after successful write" +backup_remove_failed = "Failed to remove backup: {error}" +backup_restore_attempt = "Attempting to restore from backup due to error" +backup_restore_success = "Successfully restored from backup" +backup_restore_failed = "Failed to restore from backup: {error}" +backup_creation_failed = "Failed to create backup: {error}" +invalid_service = "Invalid service: {service}" +config_key_not_found = "Configuration key '{key}' not found" +backup_file_not_found = "Backup file not found" +reading_env_file = "Reading environment file: {file_path}" +file_not_exists = "File does not exist: {file_path}" +read_success = "Successfully read {count} configuration entries from {file_path}" +read_error = "Error reading file {file_path}: {error}" +argument_must_be_in_form = "Argument must be in the form KEY=VALUE" +debug_conf_command_invoked = "Configuration command invoked with parameters:" +debug_service_param = " service: {service}" +debug_key_param = " key: {key}" +debug_value_param = " value: {value}" +debug_verbose_param = " verbose: {verbose}" +debug_output_param = " output: {output}" +debug_dry_run_param = " dry_run: {dry_run}" +debug_env_file_param = " env_file: {env_file}" +debug_timeout_param = " timeout: {timeout}" +debug_executing_dry_run = "Executing dry run mode" +debug_dry_run_completed = "Dry run completed successfully" +debug_conf_operation_result = "Configuration operation result - success: {success}" +debug_conf_operation_failed = "Configuration operation failed, raising exit" +debug_conf_operation_completed = "Configuration operation completed successfully" +debug_exception_caught = "Exception caught in configuration callback: {error_type}: {error}" +debug_exception_details = "Exception details: {error}" +debug_parsing_key_value = "Parsing key-value argument: {key_value}" +debug_key_value_parsed = "Key-value parsed - key: {key}, value: {value}" +debug_key_value_parse_failed = "Key-value parsing failed: {key_value}" +debug_config_created = "Configuration object created: {config_type}" +debug_action_created = "Action object created: {action_type}" +debug_timeout_wrapper_created = "Timeout wrapper created with timeout: {timeout}s" +debug_executing_with_timeout = "Executing operation with timeout: {timeout}s" +debug_timeout_completed = "Timeout wrapper completed successfully" +debug_timeout_error = "Timeout error occurred: {error}" +debug_validation_failed = "Configuration validation failed: {error}" +debug_service_env_file_resolved = "Service environment file resolved: {file_path}" +debug_config_file_exists = "Configuration file exists: {file_path}" +debug_config_file_not_exists = "Configuration file does not exist: {file_path}" +debug_config_file_read_success = "Configuration file read successfully: {count} entries" +debug_config_file_read_failed = "Configuration file read failed: {error}" +debug_updating_config = "Updating configuration: {key}={value}" +debug_config_updated = "Configuration updated successfully" +debug_config_file_write_failed = "Configuration file write failed: {error}" +debug_deleting_config_key = "Deleting configuration key: {key}" +debug_config_key_deleted = "Configuration key deleted successfully" +debug_config_key_not_found_delete = "Configuration key not found for deletion: {key}" +debug_listing_config = "Listing configuration for service: {service}" +debug_config_listed = "Configuration listed successfully: {count} entries" +debug_no_config_to_list = "No configuration entries to list" +debug_dry_run_simulation = "Simulating operation in dry run mode" +debug_dry_run_simulation_complete = "Dry run simulation completed" +configuration_list_title = "Configuration listed for {service}" diff --git a/cli/app/commands/conf/set.py b/cli/app/commands/conf/set.py new file mode 100644 index 00000000..16f5c8e3 --- /dev/null +++ b/cli/app/commands/conf/set.py @@ -0,0 +1,182 @@ +import os +from typing import Dict, Optional, Protocol + +from pydantic import BaseModel, Field + +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseConfig, BaseEnvironmentManager, BaseResult, BaseService +from .messages import ( + configuration_set, + configuration_set_failed, + dry_run_mode, + dry_run_set_config, + end_dry_run, + key_required, + value_required, + debug_updating_config, + debug_config_updated, + debug_service_env_file_resolved, + debug_config_file_exists, + debug_config_file_not_exists, + debug_config_file_read_success, + debug_config_file_read_failed, + debug_config_file_write_failed, + debug_dry_run_simulation, + debug_dry_run_simulation_complete, + debug_validation_failed, +) + + +class EnvironmentServiceProtocol(Protocol): + def set_config(self, service: str, key: str, value: str, env_file: str = None) -> tuple[bool, str]: ... + + +class EnvironmentManager(BaseEnvironmentManager): + def set_config(self, service: str, key: str, value: str, env_file: Optional[str] = None) -> tuple[bool, Optional[str]]: + file_path = self.get_service_env_file(service, env_file) + self.logger.debug(debug_service_env_file_resolved.format(file_path=file_path)) + + if self.logger.verbose: + if os.path.exists(file_path): + self.logger.debug(debug_config_file_exists.format(file_path=file_path)) + else: + self.logger.debug(debug_config_file_not_exists.format(file_path=file_path)) + + success, config, error = self.read_env_file(file_path) + if not success: + self.logger.debug(debug_config_file_read_failed.format(error=error)) + return False, error + + self.logger.debug(debug_config_file_read_success.format(count=len(config))) + + self.logger.debug(debug_updating_config.format(key=key, value=value)) + config[key] = value + + success, error = self.write_env_file(file_path, config) + + if success: + self.logger.debug(debug_config_updated) + else: + self.logger.debug(debug_config_file_write_failed.format(error=error)) + + return success, error + + +class SetResult(BaseResult): + pass + + +class SetConfig(BaseConfig): + key: str = Field(..., description="The key of the configuration to set") + value: str = Field(..., description="The value of the configuration to set") + + +class SetService(BaseService[SetConfig, SetResult]): + def __init__( + self, config: SetConfig, logger: LoggerProtocol = None, environment_service: EnvironmentServiceProtocol = None + ): + super().__init__(config, logger, environment_service) + self.environment_service = environment_service or EnvironmentManager(self.logger) + + def _create_result(self, success: bool, error: str = None, config_dict: Dict[str, str] = None) -> SetResult: + return SetResult( + service=self.config.service, + key=self.config.key, + value=self.config.value, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + config=config_dict or {}, + ) + + def set(self) -> SetResult: + return self.execute() + + def execute(self) -> SetResult: + if not self.config.key: + self.logger.debug(debug_validation_failed.format(error="Key is required")) + return self._create_result(False, error=key_required) + + if not self.config.value: + self.logger.debug(debug_validation_failed.format(error="Value is required")) + return self._create_result(False, error=value_required) + + if self.config.dry_run: + self.logger.debug(debug_dry_run_simulation) + result = self._create_result(True) + self.logger.debug(debug_dry_run_simulation_complete) + return result + + success, error = self.environment_service.set_config( + self.config.service, self.config.key, self.config.value, self.config.env_file + ) + + if success: + return self._create_result(True) + else: + return self._create_result(False, error=error) + + def set_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self._format_dry_run() + + result = self.execute() + return self._format_output(result, self.config.output) + + def _format_dry_run(self) -> str: + lines = [dry_run_mode] + lines.append(dry_run_set_config.format(service=self.config.service, key=self.config.key, value=self.config.value)) + lines.append(end_dry_run) + return "\n".join(lines) + + def _format_output(self, result: SetResult, output_format: str) -> str: + if output_format == "json": + formatted = self._format_json(result) + else: + formatted = self._format_text(result) + + return formatted + + def _format_json(self, result: SetResult) -> str: + import json + + output = { + "service": result.service, + "key": result.key, + "value": result.value, + "success": result.success, + "error": result.error, + } + return json.dumps(output, indent=2) + + def _format_text(self, result: SetResult) -> str: + if not result.success: + return configuration_set_failed.format(service=result.service, error=result.error) + + return configuration_set.format(service=result.service, key=result.key, value=result.value) + + +class Set(BaseAction[SetConfig, SetResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + + def set(self, config: SetConfig) -> SetResult: + return self.execute(config) + + def execute(self, config: SetConfig) -> SetResult: + service = SetService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: SetResult, output: str) -> str: + service = SetService(result, logger=self.logger) + return service._format_output(result, output) + + def set_and_format(self, config: SetConfig) -> str: + service = SetService(config, logger=self.logger) + return service.execute_and_format() diff --git a/cli/app/commands/conflict/__init__.py b/cli/app/commands/conflict/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/app/commands/conflict/command.py b/cli/app/commands/conflict/command.py new file mode 100644 index 00000000..d6872f39 --- /dev/null +++ b/cli/app/commands/conflict/command.py @@ -0,0 +1,59 @@ +import typer +from .conflict import ConflictConfig, ConflictService +from .messages import ( + conflict_check_help, + error_checking_conflicts, + conflicts_found_warning, + no_conflicts_info, + checking_conflicts_info, +) +from app.utils.logger import Logger +from app.utils.timeout import TimeoutWrapper + +conflict_app = typer.Typer(help=conflict_check_help, no_args_is_help=False) + + +@conflict_app.callback(invoke_without_command=True) +def conflict_callback( + ctx: typer.Context, + config_file: str = typer.Option("helpers/config.prod.yaml", "--config-file", "-c", help="Path to configuration file"), + timeout: int = typer.Option(5, "--timeout", "-t", help="Timeout for tool checks in seconds"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format (text/json)"), +) -> None: + """Check for tool version conflicts""" + if ctx.invoked_subcommand is None: + # Initialize logger once and reuse throughout + logger = Logger(verbose=verbose) + + try: + logger.info(checking_conflicts_info) + + config = ConflictConfig( + config_file=config_file, + verbose=verbose, + output=output, + ) + + service = ConflictService(config, logger=logger) + + with TimeoutWrapper(timeout): + result = service.check_and_format(output) + # Check if there are any conflicts and exit with appropriate code + results = service.check_conflicts() + conflicts = [r for r in results if r.conflict] + + if conflicts: + logger.error(result) + logger.warning(conflicts_found_warning.format(count=len(conflicts))) + raise typer.Exit(1) + else: + logger.success(result) + logger.info(no_conflicts_info) + + except TimeoutError as e: + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + logger.error(error_checking_conflicts.format(error=str(e))) + raise typer.Exit(1) diff --git a/cli/app/commands/conflict/conflict.py b/cli/app/commands/conflict/conflict.py new file mode 100644 index 00000000..074dbad1 --- /dev/null +++ b/cli/app/commands/conflict/conflict.py @@ -0,0 +1,403 @@ +import os +import subprocess +import re +from typing import Dict, List, Optional, Any, Tuple +from packaging import version +from packaging.specifiers import SpecifierSet +from packaging.version import Version + +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol +from app.utils.output_formatter import OutputFormatter +from app.utils.lib import ParallelProcessor +from app.utils.config import Config, DEPS +from .models import ConflictCheckResult, ConflictConfig +from .messages import * + + +class VersionParser: + + @staticmethod + def is_major_minor_format(requirement: str) -> bool: + """Check if the requirement is in major.minor format (e.g., '1.20').""" + return bool(re.match(r"^\d+\.\d+$", requirement)) + + @staticmethod + def _search_version(pattern: str, output: str, flags: int = re.IGNORECASE) -> Optional[str]: + """Helper to search for a version pattern and return group(1) if found.""" + if match := re.search(pattern, output, flags): + return match.group(1) + return None + + """Utility class for parsing and comparing versions.""" + + # Version pattern mappings for different tools + VERSION_PATTERNS = [ + r"version\s+(\d+\.\d+\.\d+)", # "version 1.20.3", "version 2.1.0" + r"v(\d+\.\d+\.\d+)", # "v1.20.3", "v2.1.0" + r"(\d+\.\d+\.\d+)", # "1.20.3", "2.1.0" (standalone) + r"Version\s+(\d+\.\d+\.\d+)", # "Version 1.20.3", "Version 2.1.0" + r"(\d+\.\d+)", # "1.20", "2.1" (major.minor only) + ] + + # Version operators for requirement specifications + VERSION_OPERATORS = [">=", "<=", ">", "<", "==", "!=", "~", "^"] + + # Supported version specification formats in config files + # Format: "description": "example" + # SUPPORTED_VERSION_FORMATS + # "exact_version": "1.20.3" + # "range_operators": ">=1.20.0, <2.0.0" + # "greater_than_equal": ">=1.20.0" + # "less_than": "<2.0.0" + # "compatible_range": "~=1.20.0" # Python-style compatible release + # "major_minor_only": "1.20" # Implies >=1.20.0, <1.21.0 + + @staticmethod + def parse_version_output(tool: str, output: str) -> Optional[str]: + """Parse version from tool output.""" + try: + # Common version patterns + for pattern in VersionParser.VERSION_PATTERNS: + if version := VersionParser._search_version(pattern, output): + return version + + # Tool-specific parsing for unique output formats + if tool == "go": + # "go version go1.20.3 darwin/amd64" -> "1.20.3" + if version := VersionParser._search_version(r"go(\d+\.\d+\.\d+)", output, 0): + return version + + elif tool == "curl": + # "curl 7.53.1 (x86_64-apple-darwin14.5.0)..." -> "7.53.1" + if version := VersionParser._search_version(r"curl\s+(\d+\.\d+\.\d+)", output, 0): + return version + + elif tool == "ssh" or tool == "open-ssh" or tool == "openssh-server": + # "OpenSSH_9.8p1, LibreSSL 3.3.6" -> "9.8.1" + if match := re.search(r"OpenSSH_(\d+\.\d+)(?:p(\d+))?", output): + major_minor = match.group(1) + patch = match.group(2) or "0" + return f"{major_minor}.{patch}" + + elif tool == "redis": + # "Redis server v=7.0.11 sha=00000000:0..." -> "7.0.11" + if version := VersionParser._search_version(r"v=(\d+\.\d+\.\d+)", output, 0): + return version + + elif tool == "postgresql" or tool == "psql": + # "psql (PostgreSQL) 14.9" -> "14.9" + if version := VersionParser._search_version(r"PostgreSQL\)\s+(\d+\.\d+)", output, 0): + return version + + elif tool == "air": + # Air might have specific format, keeping flexible for now + if version := VersionParser._search_version(r"(\d+\.\d+\.\d+)", output, 0): + return version + + return None + except Exception as e: + raise ValueError(error_parsing_version.format(tool=tool, error=str(e))) + + @staticmethod + def compare_versions(current: str, expected: str) -> bool: + """Compare version against requirement specification.""" + try: + # Handle simple version comparisons (backwards compatibility) + if not any(op in expected for op in VersionParser.VERSION_OPERATORS): + # Default to >= for simple version strings + return version.parse(current) >= version.parse(expected) + + # Handle version ranges and specifiers + spec_set = SpecifierSet(expected) + return Version(current) in spec_set + + except Exception: + # Fallback to string comparison + return current == expected + + @staticmethod + def normalize_version_requirement(requirement: str) -> str: + """ + Parse version requirement and return a normalized specifier. + """ + + if not requirement: + return requirement + + requirement = requirement.strip() + + # If it already contains operators, return as-is + if any(op in requirement for op in VersionParser.VERSION_OPERATORS): + return requirement + + # Handle major.minor format (e.g., "1.20" -> ">=1.20.0, <1.21.0") + if VersionParser.is_major_minor_format(requirement): + try: + parts = requirement.split(".") + major, minor = int(parts[0]), int(parts[1]) + return f">={requirement}.0, <{major}.{minor + 1}.0" + except (ValueError, IndexError): + return f">={requirement}" + + # Handle exact version format (e.g., "1.20.3" -> "==1.20.3") + if re.match(r"^\d+\.\d+\.\d+$", requirement): + return f"=={requirement}" + + # If none of the above, treat as exact match + return f"=={requirement}" + + @staticmethod + def validate_version_format(requirement: str) -> bool: + """ + Validate if the version requirement follows supported formats. + Returns True if the format is supported, False otherwise. + """ + + if not requirement: + return True + + requirement = requirement.strip() + + # Check if it contains supported operators + if any(op in requirement for op in VersionParser.VERSION_OPERATORS): + return True + + # Check for major.minor format + if VersionParser.is_major_minor_format(requirement): + return True + + # Check for exact version format + if re.match(r"^\d+\.\d+\.\d+$", requirement): + return True + + # If none match, it's unsupported + return False + + +class ToolVersionChecker: + """Handles version checking for different tools.""" + + # Tool name mappings for command execution + TOOL_MAPPING = {"open-ssh": "ssh", "open-sshserver": "sshd", "python3-venv": "python3"} # TODO: @shravan20 Fix this issue + + def __init__(self, logger: LoggerProtocol, deps_config: Optional[Dict[str, Any]] = None, timeout: int = 10): + self.timeout = timeout # Default timeout for individual subprocess calls + self.logger = logger + self.deps_config = deps_config or {} + + def get_tool_version(self, tool: str) -> Optional[str]: + """Get version of a tool.""" + try: + # get version-command from deps config + cmd = None + if tool in self.deps_config: + tool_cfg = self.deps_config[tool] + cmd = tool_cfg.get("version-command") + # Fallback to default if not found + if not cmd: + cmd = [tool, "--version"] + + result = subprocess.run(cmd, capture_output=True, text=True, timeout=self.timeout) + + if result.returncode == 0: + return VersionParser.parse_version_output(tool, result.stdout) + else: + # fallback to alternative command if available + alt_cmd = [tool, "-v"] + result = subprocess.run(alt_cmd, capture_output=True, text=True, timeout=self.timeout) + if result.returncode == 0: + return VersionParser.parse_version_output(tool, result.stdout) + + except subprocess.TimeoutExpired: + self.logger.error(timeout_checking_tool.format(tool=tool)) + return None + except Exception as e: + self.logger.error(error_checking_tool_version.format(tool=tool, error=str(e))) + return None + + return None + + def check_tool_version(self, tool: str, expected_version: Optional[str]) -> ConflictCheckResult: + """Check a single tool's version against expected version.""" + command_name = self.TOOL_MAPPING.get(tool, tool) + current_version = self.get_tool_version(command_name) + + if current_version is None: + return ConflictCheckResult( + tool=tool, expected=expected_version, current=None, status=tool_not_found, conflict=True + ) + + if expected_version is None or expected_version == "": + # Just check existence + return ConflictCheckResult( + tool=tool, expected="present", current=current_version, status=tool_version_compatible, conflict=False + ) + + # Parse version requirement to handle ranges + normalized_expected = VersionParser.normalize_version_requirement(expected_version) + + # Check version compatibility + is_compatible = VersionParser.compare_versions(current_version, normalized_expected) + + return ConflictCheckResult( + tool=tool, + expected=normalized_expected, + current=current_version, + status=tool_version_compatible if is_compatible else tool_version_mismatch, + conflict=not is_compatible, + ) + + +class ConflictChecker: + """Main class for checking version conflicts.""" + + def __init__(self, config: ConflictConfig, logger: LoggerProtocol): + self.config = config + self.logger = logger + self.yaml_config = Config() + # Load deps config for version-command lookup + config_data = self._load_user_config(self.config.config_file) + deps_config = config_data.get("deps", {}) + self.version_checker = ToolVersionChecker(logger, deps_config) + + def check_conflicts(self) -> List[ConflictCheckResult]: + """Check for version conflicts.""" + results = [] + + try: + # Load configuration using standardized Config class + config_data = self._load_user_config(self.config.config_file) + + # Extract version requirements from deps section + deps = config_data.get("deps", {}) + + if not deps: + self.logger.warning(no_deps_found_warning) + return results + + # Check version conflicts + results.extend(self._check_version_conflicts(deps)) + + except Exception as e: + self.logger.error(f"Error loading configuration: {str(e)}") + results.append(ConflictCheckResult(tool="configuration", status="error", conflict=True, error=str(e))) + + return results + + def _load_user_config(self, config_path: str) -> Dict[str, Any]: + """Load user configuration file using standardized Config class.""" + self.logger.debug(conflict_loading_config.format(path=config_path)) + + try: + # Use standardized Config class for loading user config + flattened_config = self.yaml_config.load_user_config(config_path) + self.logger.debug(conflict_config_loaded) + + # Convert flattened config back to nested structure for backward compatibility + nested_config = self.yaml_config.unflatten_config(flattened_config) + return nested_config + + except FileNotFoundError: + raise FileNotFoundError(conflict_config_not_found.format(path=config_path)) + except Exception as e: + raise Exception(conflict_invalid_config.format(error=str(e))) + + def _check_version_conflicts(self, deps: Dict[str, Any]) -> List[ConflictCheckResult]: + """Check for tool version conflicts from deps configuration.""" + # Extract version requirements from deps + version_requirements = self._extract_version_requirements(deps) + + if not version_requirements: + return [] + + # Check versions in parallel + results = ParallelProcessor.process_items( + items=list(version_requirements.items()), + processor_func=self._check_tool_version, + max_workers=min(len(version_requirements), 10), + error_handler=self._handle_check_error, + ) + + return results + + def _extract_version_requirements(self, deps: Dict[str, Any]) -> Dict[str, Optional[str]]: + """Extract version requirements from deps configuration.""" + version_requirements = {} + + for tool, config in deps.items(): + if isinstance(config, dict): + # Only check tools that have a version key (even if empty) + if "version" in config: + version_req = config.get("version", "") + version_requirements[tool] = version_req if version_req else None + + return version_requirements + + def _check_tool_version(self, tool_requirement: Tuple[str, Optional[str]]) -> ConflictCheckResult: + """Check version for a single tool.""" + tool, expected_version = tool_requirement + return self.version_checker.check_tool_version(tool, expected_version) + + def _handle_check_error(self, tool_requirement: Tuple[str, Optional[str]], error: Exception) -> ConflictCheckResult: + """Handle errors during version checking.""" + tool, expected_version = tool_requirement + return ConflictCheckResult( + tool=tool, expected=expected_version, current=None, status="error", conflict=True, error=str(error) + ) + + +class ConflictFormatter: + """Handles formatting of conflict check results.""" + + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, data: List[ConflictCheckResult], output_type: str) -> str: + """Format conflict check results.""" + if not data: + message = self.output_formatter.create_success_message(no_version_conflicts_message) + return self.output_formatter.format_output(message, output_type) + + messages = [] + for result in data: + data_dict = result.model_dump() + message = self._format_single_result(result) + + if result.conflict: + messages.append(self.output_formatter.create_error_message(message, data_dict)) + else: + messages.append(self.output_formatter.create_success_message(message, data_dict)) + + return self.output_formatter.format_output(messages, output_type) + + def _format_single_result(self, result: ConflictCheckResult) -> str: + """Format a single conflict check result.""" + if result.conflict: + if result.current is None: + return f"{result.tool}: {result.status}" + else: + return f"{result.tool}: Expected {result.expected}, Found {result.current}" + else: + return f"{result.tool}: Version compatible ({result.current})" + + +class ConflictService: + """Main service class for conflict checking functionality.""" + + def __init__(self, config: ConflictConfig, logger: Optional[LoggerProtocol] = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.checker = ConflictChecker(config, self.logger) + self.formatter = ConflictFormatter() + + def check_conflicts(self) -> List[ConflictCheckResult]: + """Check for conflicts and return results.""" + self.logger.debug("Starting version conflict checks") + return self.checker.check_conflicts() + + def check_and_format(self, output_type: Optional[str] = None) -> str: + """Check conflicts and return formatted output.""" + results = self.check_conflicts() + output_format = output_type or self.config.output + return self.formatter.format_output(results, output_format) diff --git a/cli/app/commands/conflict/messages.py b/cli/app/commands/conflict/messages.py new file mode 100644 index 00000000..f9570b02 --- /dev/null +++ b/cli/app/commands/conflict/messages.py @@ -0,0 +1,43 @@ +no_version_conflicts_message = "No version conflicts to check"# Message constants for conflict command + +# General messages +conflict_check_help = "Check for tool version conflicts" +error_checking_conflicts = "Error checking conflicts: {error}" +no_conflicts_found = "No version conflicts found" +conflict_checking_tool = "Checking tool: {tool}" +conflict_loading_config = "Loading configuration from {path}" +conflict_config_loaded = "Configuration loaded successfully" +conflict_config_not_found = "Configuration file not found at {path}" +conflict_invalid_config = "Invalid configuration file: {error}" + +# Tool-specific messages +tool_not_found = "Tool not found" +tool_version_mismatch = "Version mismatch" +tool_version_compatible = "Version compatible" + +# Error messages +error_checking_tool_version = "Error checking version for {tool}: {error}" +error_parsing_version = "Error parsing version for {tool}: {error}" +timeout_checking_tool = "Timeout checking tool: {tool}" + +# Success/Info messages +conflicts_found_warning = "Found {count} version conflict(s)" +no_conflicts_info = "No version conflicts found" + +# Status messages +checking_conflicts_info = "Checking for tool version conflicts..." + +# Version specification help +supported_version_formats_info = """ +Supported version formats in config files: + - Exact version: "1.20.3" + - Range operators: ">=1.20.0, <2.0.0" + - Greater/less than: ">=1.20.0", "<2.0.0" + - Compatible release: "~=1.20.0" + - Major.minor only: "1.20" (treated as >=1.20.0, <1.21.0) +""" + +unsupported_version_format_warning = "Unsupported version format '{format}' for {tool}. {help}" + +# warning messages +no_deps_found_warning = "No dependencies found in configuration" diff --git a/cli/app/commands/conflict/models.py b/cli/app/commands/conflict/models.py new file mode 100644 index 00000000..07c0e129 --- /dev/null +++ b/cli/app/commands/conflict/models.py @@ -0,0 +1,23 @@ +""" +Data models and configuration for the conflict command. +""" + +from typing import Optional +from pydantic import BaseModel, Field + + +class ConflictCheckResult(BaseModel): + """Result of a conflict check for a tool.""" + tool: str + expected: Optional[str] = None + current: Optional[str] = None + status: str + conflict: bool + error: Optional[str] = None + + +class ConflictConfig(BaseModel): + """Configuration for conflict checking.""" + config_file: str = Field("helpers/config.prod.yaml", description="Path to configuration file") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format (text/json)") diff --git a/cli/app/commands/install/__init__.py b/cli/app/commands/install/__init__.py new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/cli/app/commands/install/__init__.py @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/cli/app/commands/install/command.py b/cli/app/commands/install/command.py new file mode 100644 index 00000000..43c6bce9 --- /dev/null +++ b/cli/app/commands/install/command.py @@ -0,0 +1,118 @@ +import typer + +from app.utils.config import Config +from app.utils.logger import Logger +from app.utils.timeout import TimeoutWrapper +from .deps import install_all_deps + +from .run import Install +from .ssh import SSH, SSHConfig + +install_app = typer.Typer(help="Install Nixopus", invoke_without_command=True) + + +@install_app.callback() +def install_callback( + ctx: typer.Context, + verbose: bool = typer.Option(False, "--verbose", "-v", help="Show more details while installing"), + timeout: int = typer.Option(300, "--timeout", "-t", help="How long to wait for each step (in seconds)"), + force: bool = typer.Option(False, "--force", "-f", help="Replace files if they already exist"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="See what would happen, but don't make changes"), + config_file: str = typer.Option(None, "--config-file", "-c", help="Path to custom config file (defaults to built-in config)"), + api_domain: str = typer.Option(None, "--api-domain", "-ad", help="The domain where the nixopus api will be accessible (e.g. api.nixopus.com), if not provided you can use the ip address of the server and the port (e.g. 192.168.1.100:8443)"), + view_domain: str = typer.Option(None, "--view-domain", "-vd", help="The domain where the nixopus view will be accessible (e.g. nixopus.com), if not provided you can use the ip address of the server and the port (e.g. 192.168.1.100:80)"), +): + """Install Nixopus""" + if ctx.invoked_subcommand is None: + logger = Logger(verbose=verbose) + install = Install( + logger=logger, + verbose=verbose, + timeout=timeout, + force=force, + dry_run=dry_run, + config_file=config_file, + api_domain=api_domain, + view_domain=view_domain + ) + install.run() + +def main_install_callback(value: bool): + if value: + logger = Logger(verbose=False) + install = Install(logger=logger, verbose=False, timeout=300, force=False, dry_run=False, config_file=None) + install.run() + raise typer.Exit() + +@install_app.command(name="ssh") +def ssh( + path: str = typer.Option("~/.ssh/nixopus_ed25519", "--path", "-p", help="The SSH key path to generate"), + key_type: str = typer.Option("ed25519", "--key-type", "-t", help="The SSH key type (rsa, ed25519, ecdsa)"), + key_size: int = typer.Option(4096, "--key-size", "-s", help="The SSH key size"), + passphrase: str = typer.Option(None, "--passphrase", "-P", help="The passphrase to use for the SSH key"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + force: bool = typer.Option(False, "--force", "-f", help="Force overwrite existing SSH key"), + set_permissions: bool = typer.Option(True, "--set-permissions", "-S", help="Set proper file permissions"), + add_to_authorized_keys: bool = typer.Option( + False, "--add-to-authorized-keys", "-a", help="Add public key to authorized_keys" + ), + create_ssh_directory: bool = typer.Option( + True, "--create-ssh-directory", "-c", help="Create .ssh directory if it doesn't exist" + ), + timeout: int = typer.Option(10, "--timeout", "-T", help="Timeout in seconds"), +): + """Generate an SSH key pair with proper permissions and optional authorized_keys integration""" + try: + logger = Logger(verbose=verbose) + config = SSHConfig( + path=path, + key_type=key_type, + key_size=key_size, + passphrase=passphrase, + verbose=verbose, + output=output, + dry_run=dry_run, + force=force, + set_permissions=set_permissions, + add_to_authorized_keys=add_to_authorized_keys, + create_ssh_directory=create_ssh_directory, + ) + ssh_operation = SSH(logger=logger) + + with TimeoutWrapper(timeout): + result = ssh_operation.generate(config) + + logger.success(result.output) + except TimeoutError as e: + logger.error(e) + raise typer.Exit(1) + except Exception as e: + logger.error(e) + raise typer.Exit(1) + +@install_app.command(name="deps") +def deps( + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Install dependencies""" + try: + logger = Logger(verbose=verbose) + + with TimeoutWrapper(timeout): + result = install_all_deps(verbose=verbose, output=output, dry_run=dry_run) + + if output == "json": + print(result) + else: + logger.success("All dependencies installed successfully.") + except TimeoutError as e: + logger.error(e) + raise typer.Exit(1) + except Exception as e: + logger.error(e) + raise typer.Exit(1) diff --git a/cli/app/commands/install/deps.py b/cli/app/commands/install/deps.py new file mode 100644 index 00000000..beddf060 --- /dev/null +++ b/cli/app/commands/install/deps.py @@ -0,0 +1,134 @@ +import subprocess +import shutil +import json +from app.utils.config import Config +from app.utils.lib import HostInformation +from app.utils.logger import Logger +from app.utils.config import DEPS +from .messages import ( + unsupported_package_manager, + no_supported_package_manager, + failed_to_install, + installing_dep, + dry_run_update_cmd, + dry_run_install_cmd, +) +from app.utils.lib import ParallelProcessor + +def get_deps_from_config(): + config = Config() + deps = config.get_yaml_value(DEPS) + return [ + { + "name": name, + "package": dep.get("package", name), + "command": dep.get("command", ""), + "install_command": dep.get("install_command", "") + } + for name, dep in deps.items() + ] + +def get_installed_deps(deps, os_name, package_manager, timeout=2, verbose=False): + checker = DependencyChecker(Logger(verbose=verbose)) + return {dep["name"]: checker.check_dependency(dep, package_manager) for dep in deps} + +def update_system_packages(package_manager, logger, dry_run=False): + if package_manager == "apt": + cmd = ["sudo", "apt-get", "update"] + elif package_manager == "brew": + cmd = ["brew", "update"] + elif package_manager == "apk": + cmd = ["sudo", "apk", "update"] + elif package_manager == "yum": + cmd = ["sudo", "yum", "update"] + elif package_manager == "dnf": + cmd = ["sudo", "dnf", "update"] + elif package_manager == "pacman": + cmd = ["sudo", "pacman", "-Sy"] + else: + raise Exception(unsupported_package_manager.format(package_manager=package_manager)) + if dry_run: + logger.info(dry_run_update_cmd.format(cmd=' '.join(cmd))) + else: + subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + +def install_dep(dep, package_manager, logger, dry_run=False): + package = dep["package"] + install_command = dep.get("install_command", "") + try: + if install_command: + if dry_run: + logger.info(f"[DRY RUN] Would run: {install_command}") + return True + subprocess.check_call(install_command, shell=True) + return True + if package_manager == "apt": + cmd = ["sudo", "apt-get", "install", "-y", package] + elif package_manager == "brew": + cmd = ["brew", "install", package] + elif package_manager == "apk": + cmd = ["sudo", "apk", "add", package] + elif package_manager == "yum": + cmd = ["sudo", "yum", "install", "-y", package] + elif package_manager == "dnf": + cmd = ["sudo", "dnf", "install", "-y", package] + elif package_manager == "pacman": + cmd = ["sudo", "pacman", "-S", "--noconfirm", package] + else: + raise Exception(unsupported_package_manager.format(package_manager=package_manager)) + logger.info(installing_dep.format(dep=package)) + if dry_run: + logger.info(dry_run_install_cmd.format(cmd=' '.join(cmd))) + return True + subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + return True + except Exception as e: + logger.error(failed_to_install.format(dep=package, error=e)) + return False + +class DependencyChecker: + def __init__(self, logger=None): + self.logger = logger + + def check_dependency(self, dep, package_manager): + try: + if dep["command"]: + is_available = shutil.which(dep["command"]) is not None + return is_available + return True + except Exception: + return False + +def install_all_deps(verbose=False, output="text", dry_run=False): + logger = Logger(verbose=verbose) + deps = get_deps_from_config() + os_name = HostInformation.get_os_name() + package_manager = HostInformation.get_package_manager() + if not package_manager: + raise Exception(no_supported_package_manager) + installed = get_installed_deps(deps, os_name, package_manager, verbose=verbose) + update_system_packages(package_manager, logger, dry_run=dry_run) + to_install = [dep for dep in deps if not installed.get(dep["name"])] + + def install_wrapper(dep): + ok = install_dep(dep, package_manager, logger, dry_run=dry_run) + return {"dependency": dep["name"], "installed": ok} + + def error_handler(dep, exc): + logger.error(f"Failed to install {dep['name']}: {exc}") + return {"dependency": dep["name"], "installed": False} + + results = ParallelProcessor.process_items( + to_install, + install_wrapper, + max_workers=min(len(to_install), 8), + error_handler=error_handler, + ) + + installed_after = get_installed_deps(deps, os_name, package_manager, verbose=verbose) + failed = [dep["name"] for dep in deps if not installed_after.get(dep["name"])] + if failed and not dry_run: + raise Exception(failed_to_install.format(dep=','.join(failed), error='')) + if output == "json": + return json.dumps({"installed": results, "failed": failed, "dry_run": dry_run}) + return True diff --git a/cli/app/commands/install/messages.py b/cli/app/commands/install/messages.py new file mode 100644 index 00000000..3f59fd5e --- /dev/null +++ b/cli/app/commands/install/messages.py @@ -0,0 +1,118 @@ +path_already_exists_use_force = "Path {path} already exists. Use --force to overwrite." +executing_command = "Executing: {command}" +installing_nixopus = "Installing nixopus" +invalid_output_format = "Invalid output format" +invalid_dry_run = "Invalid dry run format" +invalid_force = "Invalid force format" +invalid_verbose = "Invalid verbose format" +dry_run_mode = "=== DRY RUN MODE ===" +dry_run_command_would_be_executed = "The following command would be executed:" +dry_run_command = "Command: {command}" +dry_run_force_mode = "Force mode: {force}" +end_dry_run = "=== END DRY RUN ===" +prerequisites_validation_failed = "Prerequisites validation failed" +successfully_added_ssh_key = "Successfully generated SSH key: {key}" +ssh_key_add_failed = "SSH key generation failed: {error}" +unexpected_error_during_ssh_add = "Unexpected error during SSH key generation: {error}" +dry_run_ssh_key = "SSH key path: {key}" +dry_run_passphrase = "Passphrase: {passphrase}" +adding_ssh_key = "Generating SSH key: {key}" +invalid_ssh_key_path = "Invalid SSH key path format" +invalid_passphrase = "Invalid passphrase format" +ssh_key_not_found = "SSH key not found: {key}" +failed_to_add_ssh_key = "Failed to generate SSH key" +executing_ssh_keygen = "Executing ssh-keygen: {command}" +successfully_generated_ssh_key = "Successfully generated SSH key: {key}" +ssh_keygen_failed = "SSH key generation failed: {error}" +unexpected_error_during_ssh_keygen = "Unexpected error during SSH key generation: {error}" +generating_ssh_key = "Generating SSH key: {key}" +invalid_key_type = "Invalid key type format" +invalid_key_size = "Invalid key size format" +setting_permissions = "Setting proper file permissions" +adding_to_authorized_keys = "Adding public key to authorized_keys" +ssh_directory_created = "Created SSH directory: {dir}" +permissions_set_successfully = "File permissions set successfully" +authorized_keys_updated = "Public key added to authorized_keys" +ssh_key_already_exists = "SSH key already exists: {path}. Use --force to overwrite." +failed_to_add_ssh_key = "Failed to add SSH key to authorized_keys" +failed_to_read_public_key = "Failed to read public key" +failed_to_append_to_authorized_keys = "Failed to append to authorized_keys" +failed_to_add_to_authorized_keys = "Failed to add to authorized_keys: {error}" +unknown_error = "Unknown error" +unsupported_package_manager = "Unsupported package manager: {package_manager}" +no_supported_package_manager = "No supported package manager found" +failed_to_install = "Failed to install {dep}: {error}" +installing_dep = "Installing {dep}" +dry_run_update_cmd = "[DRY RUN] Would run: {cmd}" +dry_run_install_cmd = "[DRY RUN] Would run: {cmd}" +timeout_error = "Operation timed out after {timeout} seconds" +failed_to_run_ssh = "Failed to run SSH setup" +failed_to_run_up = "Failed to start services" +installation_completed = "Nixopus installation completed successfully!" +installation_failed = "Installation failed" +ports_unavailable = "Required ports are not available" +missing_dependencies_warning = "Missing dependencies will be installed" +dependencies_installed = "Dependencies installed successfully" +dependency_installation_timeout = "Dependency installation timed out" +clone_failed = "Repository clone failed" +env_file_creation_failed = "Failed to create environment file" +env_file_permissions_failed = "Failed to set environment file permissions" +proxy_config_created = "Created Caddyfile" +ssh_setup_failed = "SSH setup failed" +services_start_failed = "Services failed to start" +proxy_load_failed = "Failed to load Caddy proxy configuration" +debug_ssh_config_validation = "DEBUG: Validating SSH configuration: path={path}, type={key_type}, size={key_size}" +debug_ssh_path_expansion = "DEBUG: Expanding SSH path from '{original}' to '{expanded}'" +debug_ssh_directory_check = "DEBUG: Checking SSH directory existence: {directory}" +debug_ssh_directory_creation = "DEBUG: Creating SSH directory: {directory} with permissions {permissions}" +debug_ssh_keygen_availability = "DEBUG: Checking ssh-keygen availability" +debug_ssh_keygen_command_build = "DEBUG: Building ssh-keygen command: {command}" +debug_ssh_key_generation_start = "DEBUG: Starting SSH key generation for: {path}" +debug_ssh_key_generation_success = "DEBUG: SSH key generation completed successfully: {path}" +debug_ssh_permission_setting = "DEBUG: Setting permissions for private key: {private_key} and public key: {public_key}" +debug_ssh_authorized_keys_path = "DEBUG: Authorized keys path: {path}" +debug_ssh_authorized_keys_read = "DEBUG: Reading public key content from: {path}" +debug_ssh_authorized_keys_append = "DEBUG: Appending public key to authorized_keys: {path}" +debug_ssh_keygen_availability_result = "ssh-keygen availability check result: {availability}" +debug_ssh_keygen_availability_failed = "ssh-keygen availability check failed: {error}" +debug_ssh_keygen_version_info = "SSH keygen version: {version}" +debug_ssh_process_stdout = "Process stdout: {stdout}" +debug_ssh_process_stderr = "Process stderr: {stderr}" +debug_ssh_private_key_permissions = "Setting private key permissions to 0600: {path}" +debug_ssh_private_key_permissions_failed = "Failed to set private key permissions: {error}" +debug_ssh_public_key_permissions = "Setting public key permissions to 0644: {path}" +debug_ssh_public_key_permissions_failed = "Failed to set public key permissions: {error}" +debug_ssh_permissions_success = "SSH key permissions set successfully" +debug_ssh_permissions_exception = "Exception while setting permissions: {error}" +debug_ssh_directory_created = "SSH directory created successfully: {directory}" +debug_ssh_directory_creation_failed = "Failed to create SSH directory: {error}" +debug_ssh_directory_exception = "Exception while creating SSH directory: {error}" +debug_ssh_public_key_read_failed = "Failed to read public key content: {error}" +debug_ssh_directory_missing = "SSH directory does not exist, creating: {directory}" +debug_ssh_authorized_keys_missing = "authorized_keys file does not exist, creating: {path}" +debug_ssh_authorized_keys_created = "Created authorized_keys file with 0600 permissions: {path}" +debug_ssh_authorized_keys_creation_failed = "Failed to create authorized_keys file: {error}" +debug_ssh_authorized_keys_append_failed = "Failed to append to authorized_keys: {error}" +debug_ssh_authorized_keys_exception = "Exception in add_to_authorized_keys: {error}" +debug_ssh_key_exists = "SSH key path already exists: {path}" +debug_ssh_force_disabled = "Force mode is disabled, failing validation" +debug_ssh_force_enabled = "Force mode is enabled, will overwrite existing key" +debug_ssh_key_not_exists = "SSH key path does not exist: {path}" +debug_ssh_prerequisites_completed = "Prerequisites validation completed successfully" +debug_ssh_prerequisites_failed_abort = "Prerequisites validation failed, aborting SSH key generation" +debug_ssh_dry_run_enabled = "Dry run mode enabled, skipping actual key generation" +debug_ssh_key_directory_info = "SSH key directory: {directory}" +debug_ssh_directory_creation_enabled = "SSH directory creation enabled, ensuring directory exists: {directory}" +debug_ssh_directory_creation_failed_abort = "SSH directory creation failed: {error}" +debug_ssh_generation_process_start = "Starting SSH key generation process" +debug_ssh_generation_failed_abort = "SSH key generation failed: {error}" +debug_ssh_permissions_enabled = "Setting permissions enabled, configuring SSH key permissions" +debug_ssh_public_key_path_info = "Public key path: {path}" +debug_ssh_permissions_failed_abort = "Permission setting failed: {error}" +debug_ssh_authorized_keys_enabled = "Adding to authorized_keys enabled, updating authorized_keys file" +debug_ssh_authorized_keys_failed_abort = "Adding to authorized_keys failed: {error}" +debug_ssh_process_completed = "SSH key generation process completed successfully" +operation_timed_out = "Operation timed out" +created_env_file = "Created {service_name} environment file: {env_file}" +config_file_not_found = "Config file not found: {config_file}" +configuration_key_has_no_default_value = "Configuration key '{key}' has no default value and was not provided" \ No newline at end of file diff --git a/cli/app/commands/install/run.py b/cli/app/commands/install/run.py new file mode 100644 index 00000000..f5d16d2e --- /dev/null +++ b/cli/app/commands/install/run.py @@ -0,0 +1,347 @@ +import typer +import os +import yaml +import json +import shutil +from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn +from app.utils.protocols import LoggerProtocol +from app.utils.config import Config, VIEW_ENV_FILE, API_ENV_FILE, DEFAULT_REPO, DEFAULT_BRANCH, DEFAULT_PATH, NIXOPUS_CONFIG_DIR, PORTS, DEFAULT_COMPOSE_FILE, PROXY_PORT, SSH_KEY_TYPE, SSH_KEY_SIZE, SSH_FILE_PATH, VIEW_PORT, API_PORT, DOCKER_PORT, CADDY_CONFIG_VOLUME +from app.utils.timeout import TimeoutWrapper +from app.commands.preflight.run import PreflightRunner +from app.commands.clone.clone import Clone, CloneConfig +from app.utils.lib import HostInformation, FileManager +from app.commands.conf.base import BaseEnvironmentManager +import re +from app.commands.service.up import Up, UpConfig +from app.commands.proxy.load import Load, LoadConfig +from .ssh import SSH, SSHConfig +from .messages import ( + installation_failed, installing_nixopus, + dependency_installation_timeout, + clone_failed, env_file_creation_failed, env_file_permissions_failed, + proxy_config_created, ssh_setup_failed, services_start_failed, proxy_load_failed, + operation_timed_out, created_env_file, configuration_key_has_no_default_value +) +from .deps import install_all_deps + +_config = Config() +_config_dir = _config.get_yaml_value(NIXOPUS_CONFIG_DIR) +_source_path = _config.get_yaml_value(DEFAULT_PATH) + +DEFAULTS = { + 'proxy_port': _config.get_yaml_value(PROXY_PORT), + 'ssh_key_type': _config.get_yaml_value(SSH_KEY_TYPE), + 'ssh_key_size': _config.get_yaml_value(SSH_KEY_SIZE), + 'ssh_passphrase': None, + 'service_name': 'all', + 'service_detach': True, + 'required_ports': [int(port) for port in _config.get_yaml_value(PORTS)], + 'repo_url': _config.get_yaml_value(DEFAULT_REPO), + 'branch_name': _config.get_yaml_value(DEFAULT_BRANCH), + 'source_path': _source_path, + 'config_dir': _config_dir, + 'api_env_file_path': _config.get_yaml_value(API_ENV_FILE), + 'view_env_file_path': _config.get_yaml_value(VIEW_ENV_FILE), + 'compose_file': _config.get_yaml_value(DEFAULT_COMPOSE_FILE), + 'full_source_path': os.path.join(_config_dir, _source_path), + 'ssh_key_path': _config_dir + "/" + _config.get_yaml_value(SSH_FILE_PATH), + 'compose_file_path': _config_dir + "/" + _config.get_yaml_value(DEFAULT_COMPOSE_FILE), + 'host_os': HostInformation.get_os_name(), + 'package_manager': HostInformation.get_package_manager(), + 'view_port': _config.get_yaml_value(VIEW_PORT), + 'api_port': _config.get_yaml_value(API_PORT), + 'docker_port': _config.get_yaml_value(DOCKER_PORT), +} + + +class Install: + def __init__(self, logger: LoggerProtocol = None, verbose: bool = False, timeout: int = 300, force: bool = False, dry_run: bool = False, config_file: str = None, api_domain: str = None, view_domain: str = None): + self.logger = logger + self.verbose = verbose + self.timeout = timeout + self.force = force + self.dry_run = dry_run + self.config_file = config_file + self.api_domain = api_domain + self.view_domain = view_domain + self._user_config = _config.load_user_config(self.config_file) + self.progress = None + self.main_task = None + self._validate_domains() + + def _get_config(self, key: str): + try: + return _config.get_config_value(key, self._user_config, DEFAULTS) + except ValueError: + raise ValueError(configuration_key_has_no_default_value.format(key=key)) + + def _validate_domains(self): + if (self.api_domain is None) != (self.view_domain is None): + raise ValueError("Both api_domain and view_domain must be provided together, or neither should be provided") + + if self.api_domain and self.view_domain: + domain_pattern = re.compile(r'^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\.([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?))*$') + if not domain_pattern.match(self.api_domain) or not domain_pattern.match(self.view_domain): + raise ValueError("Invalid domain format. Domains must be valid hostnames") + + + def run(self): + steps = [ + ("Preflight checks", self._run_preflight_checks), + ("Installing dependencies", self._install_dependencies), + ("Cloning repository", self._setup_clone_and_config), + ("Setting up proxy config", self._setup_proxy_config), + ("Creating environment files", self._create_env_files), + ("Generating SSH keys", self._setup_ssh), + ("Starting services", self._start_services), + ] + + # Only add proxy steps if both api_domain and view_domain are provided + if self.api_domain and self.view_domain: + steps.append(("Loading proxy configuration", self._load_proxy)) + + try: + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + transient=True, + refresh_per_second=2, + ) as progress: + self.progress = progress + self.main_task = progress.add_task(installing_nixopus, total=len(steps)) + + for i, (step_name, step_func) in enumerate(steps): + progress.update(self.main_task, description=f"{installing_nixopus} - {step_name} ({i+1}/{len(steps)})") + try: + step_func() + progress.advance(self.main_task, 1) + except Exception as e: + progress.update(self.main_task, description=f"Failed at {step_name}") + raise + + progress.update(self.main_task, completed=True, description="Installation completed") + + self._show_success_message() + + except Exception as e: + self._handle_installation_error(e) + self.logger.error(f"{installation_failed}: {str(e)}") + raise typer.Exit(1) + + def _handle_installation_error(self, error, context=""): + context_msg = f" during {context}" if context else "" + if self.verbose: + self.logger.error(f"{installation_failed}{context_msg}: {str(error)}") + else: + self.logger.error(f"{installation_failed}{context_msg}") + + def _run_preflight_checks(self): + preflight_runner = PreflightRunner(logger=self.logger, verbose=self.verbose) + preflight_runner.check_ports_from_config( + config_key='required_ports', + user_config=self._user_config, + defaults=DEFAULTS + ) + + def _install_dependencies(self): + try: + with TimeoutWrapper(self.timeout): + result = install_all_deps(verbose=self.verbose, output="json", dry_run=self.dry_run) + except TimeoutError: + raise Exception(dependency_installation_timeout) + + def _setup_clone_and_config(self): + clone_config = CloneConfig( + repo=self._get_config('repo_url'), + branch=self._get_config('branch_name'), + path=self._get_config('full_source_path'), + force=self.force, + verbose=self.verbose, + output="text", + dry_run=self.dry_run + ) + clone_service = Clone(logger=self.logger) + try: + with TimeoutWrapper(self.timeout): + result = clone_service.clone(clone_config) + except TimeoutError: + raise Exception(f"{clone_failed}: {operation_timed_out}") + if not result.success: + raise Exception(f"{clone_failed}: {result.error}") + + def _create_env_files(self): + api_env_file = self._get_config('api_env_file_path') + view_env_file = self._get_config('view_env_file_path') + FileManager.create_directory(FileManager.get_directory_path(api_env_file), logger=self.logger) + FileManager.create_directory(FileManager.get_directory_path(view_env_file), logger=self.logger) + services = [ + ("api", "services.api.env", api_env_file), + ("view", "services.view.env", view_env_file), + ] + env_manager = BaseEnvironmentManager(self.logger) + + for i, (service_name, service_key, env_file) in enumerate(services): + env_values = _config.get_service_env_values(service_key) + updated_env_values = self._update_environment_variables(env_values) + success, error = env_manager.write_env_file(env_file, updated_env_values) + if not success: + raise Exception(f"{env_file_creation_failed} {service_name}: {error}") + file_perm_success, file_perm_error = FileManager.set_permissions(env_file, 0o644) + if not file_perm_success: + raise Exception(f"{env_file_permissions_failed} {service_name}: {file_perm_error}") + self.logger.debug(created_env_file.format(service_name=service_name, env_file=env_file)) + + def _setup_proxy_config(self): + full_source_path = self._get_config('full_source_path') + caddy_json_template = os.path.join(full_source_path, 'helpers', 'caddy.json') + + if not self.dry_run: + with open(caddy_json_template, 'r') as f: + config_str = f.read() + + host_ip = HostInformation.get_public_ip() + view_port = self._get_config('view_port') + api_port = self._get_config('api_port') + + view_domain = self.view_domain if self.view_domain is not None else host_ip + api_domain = self.api_domain if self.api_domain is not None else host_ip + + config_str = config_str.replace('{env.APP_DOMAIN}', view_domain) + config_str = config_str.replace('{env.API_DOMAIN}', api_domain) + + app_reverse_proxy_url = f"{host_ip}:{view_port}" + api_reverse_proxy_url = f"{host_ip}:{api_port}" + config_str = config_str.replace('{env.APP_REVERSE_PROXY_URL}', app_reverse_proxy_url) + config_str = config_str.replace('{env.API_REVERSE_PROXY_URL}', api_reverse_proxy_url) + + caddy_config = json.loads(config_str) + with open(caddy_json_template, 'w') as f: + json.dump(caddy_config, f, indent=2) + self._copy_caddyfile_to_target(full_source_path) + + self.logger.debug(f"{proxy_config_created}: {caddy_json_template}") + + def _setup_ssh(self): + config = SSHConfig( + path=self._get_config('ssh_key_path'), + key_type=self._get_config('ssh_key_type'), + key_size=self._get_config('ssh_key_size'), + passphrase=self._get_config('ssh_passphrase'), + verbose=self.verbose, + output="text", + dry_run=self.dry_run, + force=self.force, + set_permissions=True, + add_to_authorized_keys=True, + create_ssh_directory=True, + ) + ssh_operation = SSH(logger=self.logger) + try: + with TimeoutWrapper(self.timeout): + result = ssh_operation.generate(config) + except TimeoutError: + raise Exception(f"{ssh_setup_failed}: {operation_timed_out}") + if not result.success: + raise Exception(ssh_setup_failed) + + def _start_services(self): + config = UpConfig( + name=self._get_config('service_name'), + detach=self._get_config('service_detach'), + env_file=None, + verbose=self.verbose, + output="text", + dry_run=self.dry_run, + compose_file=self._get_config('compose_file_path') + ) + + up_service = Up(logger=self.logger) + try: + with TimeoutWrapper(self.timeout): + result = up_service.up(config) + except TimeoutError: + raise Exception(f"{services_start_failed}: {operation_timed_out}") + if not result.success: + raise Exception(services_start_failed) + + def _load_proxy(self): + proxy_port = self._get_config('proxy_port') + full_source_path = self._get_config('full_source_path') + caddy_json_config = os.path.join(full_source_path, 'helpers', 'caddy.json') + config = LoadConfig(proxy_port=proxy_port, verbose=self.verbose, output="text", dry_run=self.dry_run, config_file=caddy_json_config) + + load_service = Load(logger=self.logger) + try: + with TimeoutWrapper(self.timeout): + result = load_service.load(config) + except TimeoutError: + raise Exception(f"{proxy_load_failed}: {operation_timed_out}") + + if result.success: + if not self.dry_run: + self.logger.success(load_service.format_output(result, "text")) + else: + self.logger.error(result.error) + raise Exception(proxy_load_failed) + + def _show_success_message(self): + nixopus_accessible_at = self._get_access_url() + + self.logger.success("Installation Complete!") + self.logger.info(f"Nixopus is accessible at: {nixopus_accessible_at}") + self.logger.highlight("Thank you for installing Nixopus!") + self.logger.info("Please visit the documentation at https://docs.nixopus.com for more information.") + self.logger.info("If you have any questions, please visit the community forum at https://discord.gg/skdcq39Wpv") + self.logger.highlight("See you in the community!") + + def _update_environment_variables(self, env_values: dict) -> dict: + updated_env = env_values.copy() + host_ip = HostInformation.get_public_ip() + secure = self.api_domain is not None and self.view_domain is not None + + api_host = self.api_domain if secure else f"{host_ip}:{self._get_config('api_port')}" + view_host = self.view_domain if secure else f"{host_ip}:{self._get_config('view_port')}" + protocol = "https" if secure else "http" + ws_protocol = "wss" if secure else "ws" + key_map = { + 'ALLOWED_ORIGIN': f"{protocol}://{view_host}", + 'SSH_HOST': host_ip, + 'SSH_PRIVATE_KEY': self._get_config('ssh_key_path'), + 'WEBSOCKET_URL': f"{ws_protocol}://{view_host}/ws", + 'API_URL': f"{protocol}://{api_host}/api", + 'WEBHOOK_URL': f"{protocol}://{api_host}/api/v1/webhook", + } + + for key, value in key_map.items(): + if key in updated_env: + updated_env[key] = value + + return updated_env + + def _copy_caddyfile_to_target(self, full_source_path: str): + try: + source_caddyfile = os.path.join(full_source_path, 'helpers', 'Caddyfile') + target_dir = _config.get_yaml_value(CADDY_CONFIG_VOLUME) + target_caddyfile = os.path.join(target_dir, 'Caddyfile') + FileManager.create_directory(target_dir, logger=self.logger) + if os.path.exists(source_caddyfile): + shutil.copy2(source_caddyfile, target_caddyfile) + FileManager.set_permissions(target_caddyfile, 0o644, logger=self.logger) + self.logger.debug(f"Copied Caddyfile from {source_caddyfile} to {target_caddyfile}") + else: + self.logger.warning(f"Source Caddyfile not found at {source_caddyfile}") + + except Exception as e: + self.logger.error(f"Failed to copy Caddyfile: {str(e)}") + + def _get_access_url(self): + if self.view_domain: + return f"https://{self.view_domain}" + elif self.api_domain: + return f"https://{self.api_domain}" + else: + view_port = self._get_config('view_port') + host_ip = HostInformation.get_public_ip() + return f"http://{host_ip}:{view_port}" diff --git a/cli/app/commands/install/ssh.py b/cli/app/commands/install/ssh.py new file mode 100644 index 00000000..327335a3 --- /dev/null +++ b/cli/app/commands/install/ssh.py @@ -0,0 +1,485 @@ +import os +import stat +import subprocess +from typing import Optional, Protocol + +from pydantic import BaseModel, Field, field_validator + +from app.utils.lib import FileManager +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol + +from .messages import ( + adding_to_authorized_keys, + authorized_keys_updated, + debug_ssh_authorized_keys_append, + debug_ssh_authorized_keys_append_failed, + debug_ssh_authorized_keys_created, + debug_ssh_authorized_keys_creation_failed, + debug_ssh_authorized_keys_enabled, + debug_ssh_authorized_keys_exception, + debug_ssh_authorized_keys_failed_abort, + debug_ssh_authorized_keys_missing, + debug_ssh_authorized_keys_path, + debug_ssh_authorized_keys_read, + debug_ssh_config_validation, + debug_ssh_directory_check, + debug_ssh_directory_created, + debug_ssh_directory_creation, + debug_ssh_directory_creation_enabled, + debug_ssh_directory_creation_failed, + debug_ssh_directory_creation_failed_abort, + debug_ssh_directory_exception, + debug_ssh_directory_missing, + debug_ssh_dry_run_enabled, + debug_ssh_force_disabled, + debug_ssh_force_enabled, + debug_ssh_generation_failed_abort, + debug_ssh_generation_process_start, + debug_ssh_key_directory_info, + debug_ssh_key_exists, + debug_ssh_key_generation_start, + debug_ssh_key_generation_success, + debug_ssh_key_not_exists, + debug_ssh_keygen_availability, + debug_ssh_keygen_availability_failed, + debug_ssh_keygen_availability_result, + debug_ssh_keygen_command_build, + debug_ssh_keygen_version_info, + debug_ssh_path_expansion, + debug_ssh_permission_setting, + debug_ssh_permissions_enabled, + debug_ssh_permissions_exception, + debug_ssh_permissions_failed_abort, + debug_ssh_permissions_success, + debug_ssh_prerequisites_completed, + debug_ssh_prerequisites_failed_abort, + debug_ssh_private_key_permissions, + debug_ssh_private_key_permissions_failed, + debug_ssh_process_completed, + debug_ssh_process_stderr, + debug_ssh_process_stdout, + debug_ssh_public_key_path_info, + debug_ssh_public_key_permissions, + debug_ssh_public_key_permissions_failed, + debug_ssh_public_key_read_failed, + dry_run_command, + dry_run_command_would_be_executed, + dry_run_force_mode, + dry_run_mode, + dry_run_passphrase, + dry_run_ssh_key, + end_dry_run, + executing_ssh_keygen, + failed_to_add_to_authorized_keys, + failed_to_append_to_authorized_keys, + failed_to_read_public_key, + generating_ssh_key, + invalid_key_size, + invalid_key_type, + invalid_ssh_key_path, + prerequisites_validation_failed, + ssh_key_already_exists, + ssh_keygen_failed, + successfully_generated_ssh_key, + unexpected_error_during_ssh_keygen, + unknown_error, +) + + +class SSHKeyProtocol(Protocol): + def generate_ssh_key( + self, path: str, key_type: str = "rsa", key_size: int = 4096, passphrase: str = None + ) -> tuple[bool, str]: ... + + +class SSHCommandBuilder: + @staticmethod + def build_ssh_keygen_command(path: str, key_type: str = "rsa", key_size: int = 4096, passphrase: str = None) -> list[str]: + cmd = ["ssh-keygen", "-t", key_type, "-f", path, "-N"] + if passphrase is not None: + cmd.append(passphrase) + else: + cmd.append("") + if key_type in ["rsa", "dsa", "ecdsa"]: + cmd.extend(["-b", str(key_size)]) + return cmd + + +class SSHFormatter: + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, result: "SSHResult", output: str) -> str: + if result.success: + message = successfully_generated_ssh_key.format(key=result.path) + output_message = self.output_formatter.create_success_message(message, result.model_dump()) + else: + error = result.error or unknown_error + output_message = self.output_formatter.create_error_message(error, result.model_dump()) + + return self.output_formatter.format_output(output_message, output) + + def format_dry_run(self, config: "SSHConfig") -> str: + cmd = SSHCommandBuilder.build_ssh_keygen_command(config.path, config.key_type, config.key_size, config.passphrase) + + output = [] + output.append(dry_run_mode) + output.append(dry_run_command_would_be_executed) + output.append(dry_run_command.format(command=" ".join(cmd))) + output.append(dry_run_ssh_key.format(key=config.path)) + output.append(f"Key type: {config.key_type}") + output.append(f"Key size: {config.key_size}") + if config.passphrase: + output.append(dry_run_passphrase.format(passphrase="***")) + output.append(dry_run_force_mode.format(force=config.force)) + output.append(end_dry_run) + return "\n".join(output) + + +class SSHKeyManager: + def __init__(self, logger: LoggerProtocol): + self.file_manager = FileManager() + self.logger = logger + + def _check_ssh_keygen_availability(self) -> tuple[bool, str]: + self.logger.debug(debug_ssh_keygen_availability) + try: + result = subprocess.run(["ssh-keygen", "-h"], capture_output=True, text=True, check=False) + availability = result.returncode == 0 + self.logger.debug(debug_ssh_keygen_availability_result.format(availability=availability)) + return availability, None + except Exception as e: + self.logger.debug(debug_ssh_keygen_availability_failed.format(error=e)) + return False, f"ssh-keygen not found: {e}" + + def _check_ssh_keygen_version(self) -> tuple[bool, str]: + try: + result = subprocess.run(["ssh-keygen", "-V"], capture_output=True, text=True, check=False) + if result.returncode == 0: + self.logger.debug(debug_ssh_keygen_version_info.format(version=result.stdout.strip())) + return True, None + except Exception: + return True, None + + def generate_ssh_key( + self, path: str, key_type: str = "rsa", key_size: int = 4096, passphrase: str = None, force: bool = False + ) -> tuple[bool, str]: + self.logger.debug(debug_ssh_key_generation_start.format(path=path)) + + if force: + if os.path.exists(path): + os.remove(path) + pub_path = path + ".pub" + if os.path.exists(pub_path): + os.remove(pub_path) + + cmd = SSHCommandBuilder.build_ssh_keygen_command(path, key_type, key_size, passphrase) + self.logger.debug(debug_ssh_keygen_command_build.format(command=" ".join(cmd))) + + try: + self.logger.debug(executing_ssh_keygen.format(command=" ".join(cmd))) + result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=30) + self.logger.debug(debug_ssh_key_generation_success.format(path=path)) + return True, None + except subprocess.TimeoutExpired: + self.logger.error("ssh-keygen timed out") + return False, "ssh-keygen timed out" + except subprocess.CalledProcessError as e: + self.logger.error(f"ssh-keygen failed. Command: {' '.join(cmd)}") + self.logger.debug(debug_ssh_process_stdout.format(stdout=e.stdout)) + self.logger.debug(debug_ssh_process_stderr.format(stderr=e.stderr)) + self.logger.error(ssh_keygen_failed.format(error=e.stderr.strip() if e.stderr else str(e))) + return False, e.stderr.strip() if e.stderr else str(e) + except Exception as e: + self.logger.error(f"Unexpected error running ssh-keygen. Command: {' '.join(cmd)}") + self.logger.error(unexpected_error_during_ssh_keygen.format(error=e)) + return False, str(e) + + def set_key_permissions(self, private_key_path: str, public_key_path: str) -> tuple[bool, str]: + self.logger.debug(debug_ssh_permission_setting.format(private_key=private_key_path, public_key=public_key_path)) + try: + self.logger.debug(debug_ssh_private_key_permissions.format(path=private_key_path)) + private_success, private_error = self.file_manager.set_permissions( + private_key_path, stat.S_IRUSR | stat.S_IWUSR, self.logger + ) + if not private_success: + self.logger.debug(debug_ssh_private_key_permissions_failed.format(error=private_error)) + return False, private_error + + self.logger.debug(debug_ssh_public_key_permissions.format(path=public_key_path)) + public_success, public_error = self.file_manager.set_permissions( + public_key_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, self.logger + ) + if not public_success: + self.logger.debug(debug_ssh_public_key_permissions_failed.format(error=public_error)) + return False, public_error + + self.logger.debug(debug_ssh_permissions_success) + return True, None + except Exception as e: + self.logger.debug(debug_ssh_permissions_exception.format(error=e)) + return False, f"Failed to set permissions: {e}" + + def create_ssh_directory(self, ssh_dir: str) -> tuple[bool, str]: + permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR + self.logger.debug(debug_ssh_directory_creation.format(directory=ssh_dir, permissions=oct(permissions))) + try: + self.logger.debug(debug_ssh_directory_check.format(directory=ssh_dir)) + success, error = self.file_manager.create_directory(ssh_dir, permissions, self.logger) + if success: + self.logger.debug(debug_ssh_directory_created.format(directory=ssh_dir)) + else: + self.logger.debug(debug_ssh_directory_creation_failed.format(error=error)) + return success, error + except Exception as e: + self.logger.debug(debug_ssh_directory_exception.format(error=e)) + return False, f"Failed to create SSH directory: {e}" + + def add_to_authorized_keys(self, public_key_path: str) -> tuple[bool, str]: + try: + self.logger.debug(adding_to_authorized_keys) + self.logger.debug(debug_ssh_authorized_keys_read.format(path=public_key_path)) + + success, content, error = self.file_manager.read_file_content(public_key_path, self.logger) + if not success: + self.logger.debug(debug_ssh_public_key_read_failed.format(error=error)) + return False, error or failed_to_read_public_key + + ssh_dir = self.file_manager.expand_user_path("~/.ssh") + authorized_keys_path = os.path.join(ssh_dir, "authorized_keys") + self.logger.debug(debug_ssh_authorized_keys_path.format(path=authorized_keys_path)) + + if not os.path.exists(ssh_dir): + self.logger.debug(debug_ssh_directory_missing.format(directory=ssh_dir)) + success, error = self.create_ssh_directory(ssh_dir) + if not success: + return False, error + + if not os.path.exists(authorized_keys_path): + self.logger.debug(debug_ssh_authorized_keys_missing.format(path=authorized_keys_path)) + try: + with open(authorized_keys_path, "w") as f: + pass + os.chmod(authorized_keys_path, stat.S_IRUSR | stat.S_IWUSR) + self.logger.debug(debug_ssh_authorized_keys_created.format(path=authorized_keys_path)) + except Exception as e: + self.logger.debug(debug_ssh_authorized_keys_creation_failed.format(error=e)) + return False, f"Failed to create authorized_keys file: {e}" + + self.logger.debug(debug_ssh_authorized_keys_append.format(path=authorized_keys_path)) + success, error = self.file_manager.append_to_file(authorized_keys_path, content, self.logger) + if not success: + self.logger.debug(debug_ssh_authorized_keys_append_failed.format(error=error)) + return False, error or failed_to_append_to_authorized_keys + + self.logger.debug(authorized_keys_updated) + return True, None + except Exception as e: + error_msg = failed_to_add_to_authorized_keys.format(error=e) + self.logger.debug(debug_ssh_authorized_keys_exception.format(error=e)) + self.logger.error(error_msg) + return False, error_msg + + +class SSHResult(BaseModel): + path: str + key_type: str + key_size: int + passphrase: Optional[str] + force: bool + verbose: bool + output: str + success: bool = False + error: Optional[str] = None + set_permissions: bool = True + add_to_authorized_keys: bool = False + create_ssh_directory: bool = True + + +class SSHConfig(BaseModel): + path: str = Field(..., min_length=1, description="SSH key path to generate") + key_type: str = Field("rsa", description="SSH key type (rsa, ed25519, ecdsa)") + key_size: int = Field(4096, description="SSH key size") + passphrase: Optional[str] = Field(None, description="Passphrase for the SSH key") + force: bool = Field(False, description="Force overwrite existing SSH key") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format: text, json") + dry_run: bool = Field(False, description="Dry run mode") + set_permissions: bool = Field(True, description="Set proper file permissions") + add_to_authorized_keys: bool = Field(False, description="Add public key to authorized_keys") + create_ssh_directory: bool = Field(True, description="Create .ssh directory if it doesn't exist") + + @field_validator("path") + @classmethod + def validate_path(cls, path: str) -> str: + stripped_path = path.strip() + if not stripped_path: + raise ValueError(invalid_ssh_key_path) + + if not cls._is_valid_key_path(stripped_path): + raise ValueError(invalid_ssh_key_path) + return stripped_path + + @staticmethod + def _is_valid_key_path(key_path: str) -> bool: + return ( + key_path.startswith(("~", "/", "./")) + or os.path.isabs(key_path) + or key_path.endswith((".pem", ".key", "_rsa", "_ed25519")) + ) + + @field_validator("key_type") + @classmethod + def validate_key_type(cls, key_type: str) -> str: + valid_types = ["rsa", "ed25519", "ecdsa", "dsa"] + if key_type.lower() not in valid_types: + raise ValueError(invalid_key_type) + return key_type.lower() + + @field_validator("key_size") + @classmethod + def validate_key_size(cls, key_size: int, info) -> int: + key_type = info.data.get("key_type", "rsa") + + if key_type == "ed25519": + return 256 + elif key_type == "ecdsa": + if key_size not in [256, 384, 521]: + raise ValueError(invalid_key_size) + elif key_type == "dsa": + if key_size != 1024: + raise ValueError(invalid_key_size) + else: + if key_size < 1024 or key_size > 16384: + raise ValueError(invalid_key_size) + + return key_size + + @field_validator("passphrase") + @classmethod + def validate_passphrase(cls, passphrase: str) -> Optional[str]: + if not passphrase: + return None + stripped_passphrase = passphrase.strip() + if not stripped_passphrase: + return None + return stripped_passphrase + + +class SSHService: + def __init__(self, config: SSHConfig, logger: LoggerProtocol = None, ssh_manager: SSHKeyProtocol = None): + self.logger = logger or Logger(verbose=config.verbose) + self.config = config + self.ssh_manager = ssh_manager or SSHKeyManager(self.logger) + self.formatter = SSHFormatter() + self.file_manager = FileManager() + + def _validate_prerequisites(self) -> bool: + self.logger.debug(debug_ssh_config_validation.format( + path=self.config.path, + key_type=self.config.key_type, + key_size=self.config.key_size + )) + + expanded_key_path = self.file_manager.expand_user_path(self.config.path) + self.logger.debug(debug_ssh_path_expansion.format(original=self.config.path, expanded=expanded_key_path)) + + if os.path.exists(expanded_key_path): + self.logger.debug(debug_ssh_key_exists.format(path=expanded_key_path)) + if not self.config.force: + self.logger.debug(debug_ssh_force_disabled) + self.logger.error(ssh_key_already_exists.format(path=self.config.path)) + return False + else: + self.logger.debug(debug_ssh_force_enabled) + else: + self.logger.debug(debug_ssh_key_not_exists.format(path=expanded_key_path)) + + self.logger.debug(debug_ssh_prerequisites_completed) + return True + + def _create_result(self, success: bool, error: str = None) -> SSHResult: + return SSHResult( + path=self.config.path, + key_type=self.config.key_type, + key_size=self.config.key_size, + passphrase=self.config.passphrase, + force=self.config.force, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + set_permissions=self.config.set_permissions, + add_to_authorized_keys=self.config.add_to_authorized_keys, + create_ssh_directory=self.config.create_ssh_directory, + ) + + def generate_ssh_key(self) -> SSHResult: + self.logger.debug(generating_ssh_key.format(key=self.config.path)) + + if not self._validate_prerequisites(): + self.logger.debug(debug_ssh_prerequisites_failed_abort) + return self._create_result(False, prerequisites_validation_failed) + + if self.config.dry_run: + self.logger.debug(debug_ssh_dry_run_enabled) + dry_run_output = self.formatter.format_dry_run(self.config) + return self._create_result(True, dry_run_output) + + expanded_path = self.file_manager.expand_user_path(self.config.path) + ssh_dir = self.file_manager.get_directory_path(expanded_path) + self.logger.debug(debug_ssh_key_directory_info.format(directory=ssh_dir)) + + if self.config.create_ssh_directory: + self.logger.debug(debug_ssh_directory_creation_enabled.format(directory=ssh_dir)) + success, error = self.ssh_manager.create_ssh_directory(ssh_dir) + if not success: + self.logger.debug(debug_ssh_directory_creation_failed_abort.format(error=error)) + return self._create_result(False, error) + + self.logger.debug(debug_ssh_generation_process_start) + success, error = self.ssh_manager.generate_ssh_key( + self.config.path, self.config.key_type, self.config.key_size, self.config.passphrase, self.config.force + ) + + if not success: + return self._create_result(False, error) + + if self.config.set_permissions: + self.logger.debug(debug_ssh_permissions_enabled) + public_key_path = self.file_manager.get_public_key_path(expanded_path) + self.logger.debug(debug_ssh_public_key_path_info.format(path=public_key_path)) + success, error = self.ssh_manager.set_key_permissions(expanded_path, public_key_path) + if not success: + self.logger.debug(debug_ssh_permissions_failed_abort.format(error=error)) + return self._create_result(False, error) + + if self.config.add_to_authorized_keys: + self.logger.debug(debug_ssh_authorized_keys_enabled) + public_key_path = self.file_manager.get_public_key_path(expanded_path) + success, error = self.ssh_manager.add_to_authorized_keys(public_key_path) + if not success: + self.logger.debug(debug_ssh_authorized_keys_failed_abort.format(error=error)) + return self._create_result(False, error) + + self.logger.debug(debug_ssh_process_completed) + return self._create_result(True) + + def generate_and_format(self) -> str: + result = self.generate_ssh_key() + return self.formatter.format_output(result, self.config.output) + + +class SSH: + def __init__(self, logger: LoggerProtocol = None): + self.logger = logger or Logger() + + def generate(self, config: SSHConfig) -> SSHResult: + service = SSHService(config, self.logger) + return service.generate_ssh_key() + + def format_output(self, result: SSHResult, output: str) -> str: + formatter = SSHFormatter() + return formatter.format_output(result, output) diff --git a/cli/app/commands/preflight/__init__.py b/cli/app/commands/preflight/__init__.py new file mode 100644 index 00000000..0209b0a0 --- /dev/null +++ b/cli/app/commands/preflight/__init__.py @@ -0,0 +1 @@ +# cli commands preflight module diff --git a/cli/app/commands/preflight/command.py b/cli/app/commands/preflight/command.py new file mode 100644 index 00000000..29420a63 --- /dev/null +++ b/cli/app/commands/preflight/command.py @@ -0,0 +1,155 @@ +import typer + +from app.utils.lib import HostInformation +from app.utils.logger import Logger +from app.utils.timeout import TimeoutWrapper + +from .deps import Deps, DepsConfig +from .run import PreflightRunner +from .messages import ( + debug_starting_preflight_check, + debug_preflight_check_completed, + debug_starting_ports_check, + debug_ports_check_completed, + debug_starting_deps_check, + debug_deps_check_completed, + debug_creating_port_config, + debug_creating_deps_config, + debug_initializing_port_service, + debug_initializing_deps_service, + debug_timeout_wrapper_start, + debug_timeout_wrapper_end, + debug_formatting_output, + error_checking_deps, + error_checking_ports, + error_timeout_occurred, + error_validation_failed, + running_preflight_checks, +) +from .port import PortConfig, PortService + +preflight_app = typer.Typer(no_args_is_help=False) + + +@preflight_app.callback(invoke_without_command=True) +def preflight_callback(ctx: typer.Context): + """Preflight checks for system compatibility""" + if ctx.invoked_subcommand is None: + ctx.invoke(check) + + +@preflight_app.command() +def check( + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text,json"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Run all preflight checks""" + try: + logger = Logger(verbose=verbose) + logger.debug(debug_starting_preflight_check) + logger.info(running_preflight_checks) + + logger.debug(debug_timeout_wrapper_start.format(timeout=timeout)) + with TimeoutWrapper(timeout): + preflight_runner = PreflightRunner(logger=logger, verbose=verbose) + preflight_runner.check_ports_from_config() + logger.debug(debug_timeout_wrapper_end) + logger.debug(debug_preflight_check_completed) + + logger.success("All preflight checks completed successfully") + except TimeoutError as e: + logger.error(error_timeout_occurred.format(timeout=timeout)) + raise typer.Exit(1) + except Exception as e: + if not isinstance(e, typer.Exit): + logger.error(f"Unexpected error during preflight check: {e}") + raise typer.Exit(1) + +@preflight_app.command() +def ports( + ports: list[int] = typer.Argument(..., help="The list of ports to check"), + host: str = typer.Option("localhost", "--host", "-h", help="The host to check"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +) -> None: + """Check if list of ports are available on a host""" + try: + logger = Logger(verbose=verbose) + logger.debug(debug_starting_ports_check) + + logger.debug(debug_creating_port_config) + config = PortConfig(ports=ports, host=host, verbose=verbose) + + logger.debug(debug_initializing_port_service) + port_service = PortService(config, logger=logger) + + logger.debug(debug_timeout_wrapper_start.format(timeout=timeout)) + with TimeoutWrapper(timeout): + results = port_service.check_ports() + logger.debug(debug_timeout_wrapper_end) + + logger.debug(debug_formatting_output.format(format=output)) + formatted_output = port_service.formatter.format_output(results, output) + + logger.success(formatted_output) + logger.debug(debug_ports_check_completed) + + except ValueError as e: + logger.error(error_validation_failed.format(error=e)) + raise typer.Exit(1) + except TimeoutError as e: + logger.error(error_timeout_occurred.format(timeout=timeout)) + raise typer.Exit(1) + except Exception as e: + if not isinstance(e, typer.Exit): + logger.error(error_checking_ports.format(error=e)) + raise typer.Exit(1) + + +@preflight_app.command() +def deps( + deps: list[str] = typer.Argument(..., help="The list of dependencies to check"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +) -> None: + """Check if list of dependencies are available on the system""" + try: + logger = Logger(verbose=verbose) + logger.debug(debug_starting_deps_check) + + logger.debug(debug_creating_deps_config) + config = DepsConfig( + deps=deps, + verbose=verbose, + output=output, + os=HostInformation.get_os_name(), + package_manager=HostInformation.get_package_manager(), + ) + + logger.debug(debug_initializing_deps_service) + deps_checker = Deps(logger=logger) + + logger.debug(debug_timeout_wrapper_start.format(timeout=timeout)) + with TimeoutWrapper(timeout): + results = deps_checker.check(config) + logger.debug(debug_timeout_wrapper_end) + + logger.debug(debug_formatting_output.format(format=output)) + formatted_output = deps_checker.format_output(results, output) + + logger.success(formatted_output) + logger.debug(debug_deps_check_completed) + + except ValueError as e: + logger.error(error_validation_failed.format(error=e)) + raise typer.Exit(1) + except TimeoutError as e: + logger.error(error_timeout_occurred.format(timeout=timeout)) + raise typer.Exit(1) + except Exception as e: + if not isinstance(e, typer.Exit): + logger.error(error_checking_deps.format(error=e)) + raise typer.Exit(1) diff --git a/cli/app/commands/preflight/deps.py b/cli/app/commands/preflight/deps.py new file mode 100644 index 00000000..f23edf19 --- /dev/null +++ b/cli/app/commands/preflight/deps.py @@ -0,0 +1,205 @@ +import shutil +import subprocess +from typing import Optional, Protocol + +from pydantic import BaseModel, Field, field_validator + +from app.utils.lib import ParallelProcessor, Supported +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol + +from .messages import ( + error_checking_dependency, + invalid_os, + invalid_package_manager, + timeout_checking_dependency, + debug_processing_deps, + debug_dep_check_result, + error_subprocess_execution_failed, +) + + +class DependencyCheckerProtocol(Protocol): + def check_dependency(self, dep: str) -> bool: ... + + +class DependencyChecker: + def __init__(self, logger: LoggerProtocol): + self.logger = logger + + def check_dependency(self, dep: str) -> bool: + try: + is_available = shutil.which(dep) is not None + self.logger.debug(debug_dep_check_result.format(dep=dep, status="available" if is_available else "not available")) + return is_available + + except subprocess.TimeoutExpired: + if self.logger.verbose: + self.logger.error(timeout_checking_dependency.format(dep=dep)) + return False + except Exception as e: + if self.logger.verbose: + self.logger.error(error_subprocess_execution_failed.format(dep=dep, error=e)) + return False + + +class DependencyValidator: + def validate_os(self, os: str) -> str: + if not Supported.os(os): + raise ValueError(invalid_os.format(os=os)) + return os + + def validate_package_manager(self, package_manager: str) -> str: + if not Supported.package_manager(package_manager): + raise ValueError(invalid_package_manager.format(package_manager=package_manager)) + return package_manager + + +class DependencyFormatter: + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, results: list["DepsCheckResult"], output: str) -> str: + if not results: + return self.output_formatter.format_output( + self.output_formatter.create_success_message("No dependencies to check"), output + ) + + if len(results) == 1 and output == "text": + messages = [] + result = results[0] + message = f"{result.dependency} is {'available' if result.is_available else 'not available'}" + if result.is_available: + message = f"{result.dependency} is available" + data = {"dependency": result.dependency, "is_available": result.is_available} + messages.append(self.output_formatter.create_success_message(message, data)) + else: + error = f"{result.dependency} is not available" + data = {"dependency": result.dependency, "is_available": result.is_available, "error": result.error} + messages.append(self.output_formatter.create_error_message(error, data)) + + if output == "text": + table_data = [] + for result in results: + row = { + "Dependency": result.dependency, + "Status": "available" if result.is_available else "not available" + } + if result.error and not result.is_available: + row["Error"] = result.error + table_data.append(row) + + return self.output_formatter.create_table( + table_data, + title="Dependency Check Results", + show_header=True, + show_lines=True + ) + else: + json_data = [] + for result in results: + item = { + "dependency": result.dependency, + "is_available": result.is_available, + "status": "available" if result.is_available else "not available" + } + if result.error and not result.is_available: + item["error"] = result.error + json_data.append(item) + + return self.output_formatter.format_json(json_data) + + +class DepsCheckResult(BaseModel): + dependency: str + verbose: bool + output: str + os: str + package_manager: str + is_available: bool = False + error: Optional[str] = None + + +class DepsConfig(BaseModel): + deps: list[str] = Field(..., min_length=1, description="The list of dependencies to check") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format, text, json") + os: str = Field(..., description=f"The operating system to check, available: {Supported.get_os()}") + package_manager: str = Field(..., description="The package manager to use") + + @field_validator("os") + @classmethod + def validate_os(cls, os: str) -> str: + validator = DependencyValidator() + return validator.validate_os(os) + + @field_validator("package_manager") + @classmethod + def validate_package_manager(cls, package_manager: str) -> str: + validator = DependencyValidator() + return validator.validate_package_manager(package_manager) + + +class DepsService: + def __init__(self, config: DepsConfig, logger: LoggerProtocol = None, checker: DependencyCheckerProtocol = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.checker = checker or DependencyChecker(self.logger) + self.formatter = DependencyFormatter() + + def _create_result(self, dep: str, is_available: bool, error: str = None) -> DepsCheckResult: + return DepsCheckResult( + dependency=dep, + verbose=self.config.verbose, + output=self.config.output, + os=self.config.os, + package_manager=self.config.package_manager, + is_available=is_available, + error=error, + ) + + def _check_dependency(self, dep: str) -> DepsCheckResult: + try: + is_available = self.checker.check_dependency(dep) + return self._create_result(dep, is_available) + except Exception as e: + return self._create_result(dep, False, str(e)) + + def check_dependencies(self) -> list[DepsCheckResult]: + self.logger.debug(debug_processing_deps.format(count=len(self.config.deps))) + + def process_dep(dep: str) -> DepsCheckResult: + return self._check_dependency(dep) + + def error_handler(dep: str, error: Exception) -> DepsCheckResult: + if self.logger.verbose: + self.logger.error(error_checking_dependency.format(dep=dep, error=error)) + return self._create_result(dep, False, str(error)) + + results = ParallelProcessor.process_items( + items=self.config.deps, + processor_func=process_dep, + max_workers=min(len(self.config.deps), 50), + error_handler=error_handler, + ) + + return results + + def check_and_format(self) -> str: + results = self.check_dependencies() + return self.formatter.format_output(results, self.config.output) + + +class Deps: + def __init__(self, logger: LoggerProtocol = None): + self.logger = logger + self.validator = DependencyValidator() + self.formatter = DependencyFormatter() + + def check(self, config: DepsConfig) -> list[DepsCheckResult]: + service = DepsService(config, logger=self.logger) + return service.check_dependencies() + + def format_output(self, results: list[DepsCheckResult], output: str) -> str: + return self.formatter.format_output(results, output) diff --git a/cli/app/commands/preflight/messages.py b/cli/app/commands/preflight/messages.py new file mode 100644 index 00000000..3da34af8 --- /dev/null +++ b/cli/app/commands/preflight/messages.py @@ -0,0 +1,45 @@ +preflight_check_app_help = "Checks to ensure the system is ready for nixopus self-hosting" +running_preflight_checks = "Running preflight checks..." +check_ports = "Checking if ports are available..." +ports_list_contains_less_than_1_port = "Ports list contains less than 1 port" +ports_list_contains_values_outside_range_1_65535 = "Ports list contains values outside the range 1-65535" +ports_list_contains_non_integer_values = "Ports list contains non-integer values" +available = "available" +not_available = "not available" +error_checking_port = "Error checking port {port}: {error}" +host_must_be_localhost_or_valid_ip_or_domain = "Host must be 'localhost', a valid IP address, or a valid domain name" +invalid_distribution = "Invalid distribution: {distribution}" +invalid_os = "Invalid OS: {os}" +invalid_package_manager = "Invalid package manager: {package_manager}" +error_checking_deps = "Error checking dependencies: {error}" +error_checking_ports = "Error checking ports: {error}" +invalid_output_format = "Invalid output format: {output}" +error_checking_dependency = "Error checking dependency {dep}: {error}" +timeout_checking_dependency = "Timeout checking dependency: {dep}" +debug_starting_preflight_check = "Starting preflight check command" +debug_preflight_check_completed = "Preflight check completed" +debug_starting_ports_check = "Starting ports check command" +debug_ports_check_completed = "Ports check completed" +debug_starting_deps_check = "Starting dependencies check command" +debug_deps_check_completed = "Dependencies check completed" +debug_creating_port_config = "Creating port configuration" +debug_creating_deps_config = "Creating dependencies configuration" +debug_initializing_port_service = "Initializing port service" +debug_initializing_deps_service = "Initializing dependencies service" +debug_processing_ports = "Processing {count} ports" +debug_processing_deps = "Processing {count} dependencies" +debug_port_check_result = "Port {port} check result: {status}" +debug_dep_check_result = "Dependency {dep} check result: {status}" +debug_formatting_output = "Formatting output as {format}" +debug_timeout_wrapper_start = "Starting timeout wrapper with {timeout}s timeout" +debug_timeout_wrapper_end = "Timeout wrapper completed" +error_invalid_port_range = "Port {port} is outside valid range (1-65535)" +error_invalid_host_format = "Invalid host format: {host}" +error_timeout_occurred = "Operation timed out after {timeout} seconds" +error_validation_failed = "Validation failed: {error}" +error_service_initialization_failed = "Failed to initialize service: {error}" +error_output_formatting_failed = "Failed to format output: {error}" +error_parallel_processing_failed = "Parallel processing failed: {error}" +error_socket_connection_failed = "Socket connection failed for port {port}: {error}" +error_subprocess_execution_failed = "Subprocess execution failed for dependency {dep}: {error}" +ports_unavailable = "Ports unavailable" diff --git a/cli/app/commands/preflight/port.py b/cli/app/commands/preflight/port.py new file mode 100644 index 00000000..d956256c --- /dev/null +++ b/cli/app/commands/preflight/port.py @@ -0,0 +1,168 @@ +import re +import socket +from typing import Any, List, Optional, Protocol, TypedDict, Union + +from pydantic import BaseModel, Field, field_validator + +from app.utils.lib import ParallelProcessor +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol + +from .messages import ( + available, + error_checking_port, + host_must_be_localhost_or_valid_ip_or_domain, + not_available, + debug_processing_ports, + debug_port_check_result, + error_socket_connection_failed, +) + + +class PortCheckerProtocol(Protocol): + def check_port(self, port: int, config: "PortConfig") -> "PortCheckResult": ... + + +class PortCheckResult(TypedDict): + port: int + status: str + host: Optional[str] + error: Optional[str] + is_available: bool + + +class PortConfig(BaseModel): + ports: List[int] = Field(..., min_length=1, max_length=65535, description="List of ports to check") + host: str = Field("localhost", min_length=1, description="Host to check") + verbose: bool = Field(False, description="Verbose output") + + @field_validator("host") + @classmethod + def validate_host(cls, v: str) -> str: + if v.lower() == "localhost": + return v + ip_pattern = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" + if re.match(ip_pattern, v): + return v + domain_pattern = r"^[a-zA-Z]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)*$" + if re.match(domain_pattern, v): + return v + raise ValueError(host_must_be_localhost_or_valid_ip_or_domain) + + +class PortFormatter: + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, data: Union[str, List[PortCheckResult], Any], output_type: str) -> str: + if isinstance(data, list): + if len(data) == 1 and output_type == "text": + item = data[0] + message = f"Port {item['port']}: {item['status']}" + if item.get("is_available", False): + return self.output_formatter.create_success_message(message).message + else: + return f"Error: {message}" + + if output_type == "text": + table_data = [] + for item in data: + row = { + "Port": str(item['port']), + "Status": item['status'] + } + if item.get('host') and item['host'] != "localhost": + row["Host"] = item['host'] + if item.get('error'): + row["Error"] = item['error'] + table_data.append(row) + + return self.output_formatter.create_table( + table_data, + title="Port Check Results", + show_header=True, + show_lines=True + ) + else: + json_data = [] + for item in data: + port_data = { + "port": item['port'], + "status": item['status'], + "is_available": item.get('is_available', False) + } + if item.get('host'): + port_data["host"] = item['host'] + if item.get('error'): + port_data["error"] = item['error'] + json_data.append(port_data) + return self.output_formatter.format_json(json_data) + else: + return str(data) + + +class PortChecker: + def __init__(self, logger: LoggerProtocol): + self.logger = logger + + def is_port_available(self, host: str, port: int) -> bool: + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.settimeout(1) + result = sock.connect_ex((host, port)) + return result != 0 + except Exception as e: + if self.logger.verbose: + self.logger.error(error_socket_connection_failed.format(port=port, error=e)) + return False + + def check_port(self, port: int, config: PortConfig) -> PortCheckResult: + try: + status = available if self.is_port_available(config.host, port) else not_available + self.logger.debug(debug_port_check_result.format(port=port, status=status)) + return self._create_result(port, config, status) + except Exception as e: + if self.logger.verbose: + self.logger.error(error_checking_port.format(port=port, error=str(e))) + return self._create_result(port, config, not_available, str(e)) + + def _create_result(self, port: int, config: PortConfig, status: str, error: Optional[str] = None) -> PortCheckResult: + return { + "port": port, + "status": status, + "host": config.host if config.host != "localhost" else None, + "error": error, + "is_available": status == available, + } + + +class PortService: + def __init__(self, config: PortConfig, logger: LoggerProtocol = None, checker: PortCheckerProtocol = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.checker = checker or PortChecker(self.logger) + self.formatter = PortFormatter() + + def check_ports(self) -> List[PortCheckResult]: + self.logger.debug(debug_processing_ports.format(count=len(self.config.ports))) + + def process_port(port: int) -> PortCheckResult: + return self.checker.check_port(port, self.config) + + def error_handler(port: int, error: Exception) -> PortCheckResult: + if self.logger.verbose: + self.logger.error(error_checking_port.format(port=port, error=str(error))) + return self.checker._create_result(port, self.config, not_available, str(error)) + + results = ParallelProcessor.process_items( + items=self.config.ports, + processor_func=process_port, + max_workers=min(len(self.config.ports), 50), + error_handler=error_handler, + ) + return sorted(results, key=lambda x: x["port"]) + + def check_and_format(self, output_type: str) -> str: + results = self.check_ports() + return self.formatter.format_output(results, output_type) diff --git a/cli/app/commands/preflight/run.py b/cli/app/commands/preflight/run.py new file mode 100644 index 00000000..2135ce0a --- /dev/null +++ b/cli/app/commands/preflight/run.py @@ -0,0 +1,38 @@ +from typing import List, Dict, Any +from app.utils.protocols import LoggerProtocol +from app.utils.config import Config +from .port import PortConfig, PortService +from .messages import ports_unavailable + + +class PreflightRunner: + """Centralized preflight check runner for port availability""" + + def __init__(self, logger: LoggerProtocol = None, verbose: bool = False): + self.logger = logger + self.verbose = verbose + self.config = Config() + + def run_port_checks(self, ports: List[int], host: str = "localhost") -> List[Dict[str, Any]]: + """Run port availability checks and return results""" + port_config = PortConfig(ports=ports, host=host, verbose=self.verbose) + port_service = PortService(port_config, logger=self.logger) + return port_service.check_ports() + + def check_required_ports(self, ports: List[int], host: str = "localhost") -> None: + """Check required ports and raise exception if any are unavailable""" + port_results = self.run_port_checks(ports, host) + unavailable_ports = [result for result in port_results if not result.get('is_available', True)] + + if unavailable_ports: + error_msg = f"{ports_unavailable}: {[p['port'] for p in unavailable_ports]}" + raise Exception(error_msg) + + def check_ports_from_config(self, config_key: str = 'required_ports', user_config: dict = None, defaults: dict = None) -> None: + """Check ports using configuration values""" + if user_config is not None and defaults is not None: + ports = self.config.get_config_value(config_key, user_config, defaults) + else: + ports = self.config.get_yaml_value('ports') + + self.check_required_ports(ports) diff --git a/cli/app/commands/proxy/__init__.py b/cli/app/commands/proxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/app/commands/proxy/base.py b/cli/app/commands/proxy/base.py new file mode 100644 index 00000000..9d9fc455 --- /dev/null +++ b/cli/app/commands/proxy/base.py @@ -0,0 +1,269 @@ +import json +from typing import Generic, Optional, Protocol, TypeVar + +import requests +from pydantic import BaseModel, Field, field_validator + +from app.utils.config import Config, PROXY_PORT, CONFIG_ENDPOINT, LOAD_ENDPOINT, STOP_ENDPOINT, CADDY_BASE_URL +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol + +from .messages import ( + caddy_connection_failed, + config_file_not_found, + port_must_be_between_1_and_65535, + debug_checking_caddy_status, + debug_caddy_response, + debug_caddy_config_accessible, + debug_caddy_non_200, + debug_connection_refused, + debug_request_failed, + debug_unexpected_error, + debug_loading_config_file, + debug_config_parsed, + debug_posting_config, + debug_caddy_load_response, + debug_config_loaded_success, + debug_caddy_load_failed, + debug_stopping_caddy, + debug_caddy_stop_response, + debug_caddy_stopped_success, + debug_caddy_stop_failed, + caddy_is_running, + caddy_not_running, + invalid_json_error, + cannot_connect_to_caddy, + request_failed_error, + http_error, + unexpected_error, +) + +TConfig = TypeVar("TConfig", bound=BaseModel) +TResult = TypeVar("TResult", bound=BaseModel) + +config = Config() +proxy_port = config.get_yaml_value(PROXY_PORT) +caddy_config_endpoint = config.get_yaml_value(CONFIG_ENDPOINT) +caddy_load_endpoint = config.get_yaml_value(LOAD_ENDPOINT) +caddy_stop_endpoint = config.get_yaml_value(STOP_ENDPOINT) +caddy_base_url = config.get_yaml_value(CADDY_BASE_URL) + +class CaddyServiceProtocol(Protocol): + def check_status(self, port: int = proxy_port) -> tuple[bool, str]: ... + + def load_config(self, config_file: str, port: int = proxy_port) -> tuple[bool, str]: ... + + def stop_proxy(self, port: int = proxy_port) -> tuple[bool, str]: ... + + +class BaseCaddyCommandBuilder: + @staticmethod + def build_status_command(port: int = proxy_port) -> list[str]: + return ["curl", "-X", "GET", f"{caddy_base_url.format(port=port)}{caddy_config_endpoint}"] + + @staticmethod + def build_load_command(config_file: str, port: int = proxy_port) -> list[str]: + return [ + "curl", + "-X", + "POST", + f"{caddy_base_url.format(port=port)}{caddy_load_endpoint}", + "-H", + "Content-Type: application/json", + "-d", + f"@{config_file}", + ] + + @staticmethod + def build_stop_command(port: int = proxy_port) -> list[str]: + return ["curl", "-X", "POST", f"{caddy_base_url.format(port=port)}{caddy_stop_endpoint}"] + + +class BaseFormatter: + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, result: TResult, output: str, success_message: str, error_message: str) -> str: + if result.success: + message = success_message.format(port=result.proxy_port) + output_message = self.output_formatter.create_success_message(message, result.model_dump()) + else: + error = result.error or "Unknown error occurred" + output_message = self.output_formatter.create_error_message(error, result.model_dump()) + + return self.output_formatter.format_output(output_message, output) + + def format_dry_run(self, config: TConfig, command_builder, dry_run_messages: dict) -> str: + if hasattr(command_builder, "build_status_command"): + cmd = command_builder.build_status_command(getattr(config, "proxy_port", proxy_port)) + elif hasattr(command_builder, "build_load_command"): + cmd = command_builder.build_load_command(getattr(config, "config_file", ""), getattr(config, "proxy_port", proxy_port)) + elif hasattr(command_builder, "build_stop_command"): + cmd = command_builder.build_stop_command(getattr(config, "proxy_port", proxy_port)) + else: + cmd = command_builder.build_command(config) + + output = [] + output.append(dry_run_messages["mode"]) + output.append(dry_run_messages["command_would_be_executed"]) + output.append(f"{dry_run_messages['command']} {' '.join(cmd)}") + output.append(f"{dry_run_messages['port']} {getattr(config, 'proxy_port', proxy_port)}") + + if hasattr(config, "config_file") and getattr(config, "config_file", None): + output.append(f"{dry_run_messages['config_file']} {getattr(config, 'config_file')}") + + output.append(dry_run_messages["end"]) + return "\n".join(output) + + +class BaseCaddyService: + def __init__(self, logger: LoggerProtocol): + self.logger = logger + + def _get_caddy_url(self, port: int, endpoint: str) -> str: + return f"{caddy_base_url.format(port=port)}{endpoint}" + + def check_status(self, port: int = proxy_port) -> tuple[bool, str]: + try: + url = self._get_caddy_url(port, caddy_config_endpoint) + self.logger.debug(debug_checking_caddy_status.format(url=url)) + + response = requests.get(url, timeout=5) + self.logger.debug(debug_caddy_response.format(code=response.status_code)) + + if response.status_code == 200: + self.logger.debug(debug_caddy_config_accessible) + return True, caddy_is_running + else: + self.logger.debug(debug_caddy_non_200.format(code=response.status_code)) + return False, http_error.format(code=response.status_code) + except requests.exceptions.ConnectionError: + self.logger.debug(debug_connection_refused.format(port=port)) + return False, caddy_not_running + except requests.exceptions.RequestException as e: + self.logger.debug(debug_request_failed.format(error=str(e))) + return False, request_failed_error.format(error=str(e)) + except Exception as e: + self.logger.debug(debug_unexpected_error.format(error=str(e))) + return False, unexpected_error.format(error=str(e)) + + def load_config(self, config_file: str, port: int = proxy_port) -> tuple[bool, str]: + try: + self.logger.debug(debug_loading_config_file.format(file=config_file)) + with open(config_file, "r") as f: + config_data = json.load(f) + self.logger.debug(debug_config_parsed) + + url = self._get_caddy_url(port, caddy_load_endpoint) + self.logger.debug(debug_posting_config.format(url=url)) + + response = requests.post(url, json=config_data, headers={"Content-Type": "application/json"}, timeout=10) + self.logger.debug(debug_caddy_load_response.format(code=response.status_code)) + + if response.status_code == 200: + self.logger.debug(debug_config_loaded_success) + return True, "Configuration loaded" + else: + error_msg = response.text.strip() if response.text else http_error.format(code=response.status_code) + self.logger.debug(debug_caddy_load_failed.format(error=error_msg)) + return False, error_msg + except FileNotFoundError: + error_msg = config_file_not_found.format(file=config_file) + self.logger.debug(error_msg) + return False, error_msg + except json.JSONDecodeError as e: + error_msg = invalid_json_error.format(error=str(e)) + self.logger.debug(error_msg) + return False, error_msg + except requests.exceptions.ConnectionError: + error_msg = caddy_connection_failed.format(error=str(e)) + self.logger.debug(error_msg) + return False, error_msg + except requests.exceptions.RequestException as e: + error_msg = request_failed_error.format(error=str(e)) + self.logger.debug(error_msg) + return False, error_msg + except Exception as e: + error_msg = unexpected_error.format(error=str(e)) + self.logger.debug(error_msg) + return False, error_msg + + def stop_proxy(self, port: int = proxy_port) -> tuple[bool, str]: + try: + url = self._get_caddy_url(port, caddy_stop_endpoint) + self.logger.debug(debug_stopping_caddy.format(url=url)) + + response = requests.post(url, timeout=5) + self.logger.debug(debug_caddy_stop_response.format(code=response.status_code)) + + if response.status_code == 200: + self.logger.debug(debug_caddy_stopped_success) + return True, "Caddy stopped" + else: + error_msg = http_error.format(code=response.status_code) + self.logger.debug(debug_caddy_stop_failed.format(error=error_msg)) + return False, error_msg + except requests.exceptions.ConnectionError: + error_msg = cannot_connect_to_caddy.format(port=port) + self.logger.debug(error_msg) + return False, error_msg + except requests.exceptions.RequestException as e: + error_msg = request_failed_error.format(error=str(e)) + self.logger.debug(error_msg) + return False, error_msg + except Exception as e: + error_msg = unexpected_error.format(error=str(e)) + self.logger.debug(error_msg) + return False, error_msg + + +class BaseConfig(BaseModel): + proxy_port: int = Field(proxy_port, description="Caddy admin port") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format: text, json") + dry_run: bool = Field(False, description="Dry run mode") + + @field_validator("proxy_port") + @classmethod + def validate_proxy_port(cls, port: int) -> int: + if port < 1 or port > 65535: + raise ValueError(port_must_be_between_1_and_65535) + return port + + +class BaseResult(BaseModel): + proxy_port: int + verbose: bool + output: str + success: bool = False + error: Optional[str] = None + + +class BaseService(Generic[TConfig, TResult]): + def __init__(self, config: TConfig, logger: LoggerProtocol = None, caddy_service: CaddyServiceProtocol = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.caddy_service = caddy_service + self.formatter = None + + def _create_result(self, success: bool, error: str = None) -> TResult: + raise NotImplementedError + + def execute(self) -> TResult: + raise NotImplementedError + + def execute_and_format(self) -> str: + raise NotImplementedError + + +class BaseAction(Generic[TConfig, TResult]): + def __init__(self, logger: LoggerProtocol = None): + self.logger = logger + self.formatter = None + + def execute(self, config: TConfig) -> TResult: + raise NotImplementedError + + def format_output(self, result: TResult, output: str) -> str: + raise NotImplementedError diff --git a/cli/app/commands/proxy/command.py b/cli/app/commands/proxy/command.py new file mode 100644 index 00000000..6a0dfeb7 --- /dev/null +++ b/cli/app/commands/proxy/command.py @@ -0,0 +1,129 @@ +import typer + +from app.utils.config import Config, PROXY_PORT +from app.utils.logger import Logger +from app.utils.timeout import TimeoutWrapper + +from .load import Load, LoadConfig +from .status import Status, StatusConfig +from .stop import Stop, StopConfig +from .messages import operation_timed_out, unexpected_error + +proxy_app = typer.Typer( + name="proxy", + help="Manage Nixopus proxy (Caddy) configuration", +) + +config = Config() +proxy_port = config.get_yaml_value(PROXY_PORT) + +@proxy_app.command() +def load( + proxy_port: int = typer.Option(proxy_port, "--proxy-port", "-p", help="Caddy admin port"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format: text, json"), + dry_run: bool = typer.Option(False, "--dry-run", help="Dry run"), + config_file: str = typer.Option(None, "--config-file", "-c", help="Path to Caddy config file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Load Caddy proxy configuration""" + logger = Logger(verbose=verbose) + + try: + config = LoadConfig(proxy_port=proxy_port, verbose=verbose, output=output, dry_run=dry_run, config_file=config_file) + load_service = Load(logger=logger) + + with TimeoutWrapper(timeout): + result = load_service.load(config) + + output_text = load_service.format_output(result, output) + if result.success: + logger.success(output_text) + else: + logger.error(output_text) + raise typer.Exit(1) + + except TimeoutError: + logger.error(operation_timed_out.format(timeout=timeout)) + raise typer.Exit(1) + except ValueError as e: + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + if not isinstance(e, typer.Exit): + logger.error(unexpected_error.format(error=str(e))) + raise typer.Exit(1) + + +@proxy_app.command() +def status( + proxy_port: int = typer.Option(proxy_port, "--proxy-port", "-p", help="Caddy admin port"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format: text, json"), + dry_run: bool = typer.Option(False, "--dry-run", help="Dry run"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Check Caddy proxy status""" + logger = Logger(verbose=verbose) + + try: + config = StatusConfig(proxy_port=proxy_port, verbose=verbose, output=output, dry_run=dry_run) + status_service = Status(logger=logger) + + with TimeoutWrapper(timeout): + result = status_service.status(config) + + output_text = status_service.format_output(result, output) + if result.success: + logger.success(output_text) + else: + logger.error(output_text) + raise typer.Exit(1) + + except TimeoutError: + logger.error(operation_timed_out.format(timeout=timeout)) + raise typer.Exit(1) + except ValueError as e: + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + if not isinstance(e, typer.Exit): + logger.error(unexpected_error.format(error=str(e))) + raise typer.Exit(1) + + +@proxy_app.command() +def stop( + proxy_port: int = typer.Option(proxy_port, "--proxy-port", "-p", help="Caddy admin port"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format: text, json"), + dry_run: bool = typer.Option(False, "--dry-run", help="Dry run"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Stop Caddy proxy""" + logger = Logger(verbose=verbose) + + try: + config = StopConfig(proxy_port=proxy_port, verbose=verbose, output=output, dry_run=dry_run) + stop_service = Stop(logger=logger) + + with TimeoutWrapper(timeout): + result = stop_service.stop(config) + + output_text = stop_service.format_output(result, output) + if result.success: + logger.success(output_text) + else: + logger.error(output_text) + raise typer.Exit(1) + + except TimeoutError: + logger.error(operation_timed_out.format(timeout=timeout)) + raise typer.Exit(1) + except ValueError as e: + logger.error(str(e)) + raise typer.Exit(1) + except Exception as e: + if not isinstance(e, typer.Exit): + logger.error(unexpected_error.format(error=str(e))) + raise typer.Exit(1) diff --git a/cli/app/commands/proxy/load.py b/cli/app/commands/proxy/load.py new file mode 100644 index 00000000..0b1f7314 --- /dev/null +++ b/cli/app/commands/proxy/load.py @@ -0,0 +1,134 @@ +import os +from typing import Optional, Protocol + +from pydantic import Field, field_validator + +from app.utils.config import Config, PROXY_PORT +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseCaddyCommandBuilder, BaseCaddyService, BaseConfig, BaseFormatter, BaseResult, BaseService +from .messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_config_file, + dry_run_mode, + dry_run_port, + end_dry_run, +) + +config = Config() +proxy_port = config.get_yaml_value(PROXY_PORT) + + +class CaddyServiceProtocol(Protocol): + def load_config(self, config_file: str, port: int = proxy_port) -> tuple[bool, str]: ... + + +class CaddyCommandBuilder(BaseCaddyCommandBuilder): + @staticmethod + def build_load_command(config_file: str, port: int = proxy_port) -> list[str]: + return BaseCaddyCommandBuilder.build_load_command(config_file, port) + + +class LoadFormatter(BaseFormatter): + def format_output(self, result: "LoadResult", output: str) -> str: + if output == "json": + success_msg = "Configuration loaded successfully" if result.success else "Failed to load configuration" + return super().format_output(result, output, success_msg, result.error or "Unknown error") + + if result.success: + return "Configuration loaded successfully" + else: + return result.error or "Failed to load configuration" + + def format_dry_run(self, config: "LoadConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "port": dry_run_port, + "config_file": dry_run_config_file, + "end": end_dry_run, + } + return super().format_dry_run(config, CaddyCommandBuilder(), dry_run_messages) + + +class CaddyService(BaseCaddyService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger) + + def load_config_file(self, config_file: str, port: int = proxy_port) -> tuple[bool, str]: + return self.load_config(config_file, port) + + +class LoadResult(BaseResult): + config_file: Optional[str] + + +class LoadConfig(BaseConfig): + config_file: Optional[str] = Field(None, description="Path to Caddy config file") + + @field_validator("config_file") + @classmethod + def validate_config_file(cls, config_file: str) -> Optional[str]: + if not config_file: + return None + stripped_config_file = config_file.strip() + if not stripped_config_file: + return None + if not os.path.exists(stripped_config_file): + raise ValueError(f"Configuration file not found: {stripped_config_file}") + return stripped_config_file + + +class LoadService(BaseService[LoadConfig, LoadResult]): + def __init__(self, config: LoadConfig, logger: LoggerProtocol = None, caddy_service: CaddyServiceProtocol = None): + super().__init__(config, logger, caddy_service) + self.caddy_service = caddy_service or CaddyService(self.logger) + self.formatter = LoadFormatter() + + def _create_result(self, success: bool, error: str = None) -> LoadResult: + return LoadResult( + proxy_port=self.config.proxy_port, + config_file=self.config.config_file, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + ) + + def load(self) -> LoadResult: + return self.execute() + + def execute(self) -> LoadResult: + if not self.config.config_file: + return self._create_result(False, "Configuration file is required") + + success, message = self.caddy_service.load_config_file(self.config.config_file, self.config.proxy_port) + return self._create_result(success, None if success else message) + + def load_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Load(BaseAction[LoadConfig, LoadResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = LoadFormatter() + + def load(self, config: LoadConfig) -> LoadResult: + return self.execute(config) + + def execute(self, config: LoadConfig) -> LoadResult: + service = LoadService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: LoadResult, output: str) -> str: + return self.formatter.format_output(result, output) diff --git a/cli/app/commands/proxy/messages.py b/cli/app/commands/proxy/messages.py new file mode 100644 index 00000000..86122f64 --- /dev/null +++ b/cli/app/commands/proxy/messages.py @@ -0,0 +1,56 @@ +dry_run_mode = "🔍 DRY RUN MODE" +dry_run_command_would_be_executed = "The following command would be executed:" +dry_run_command = "Command:" +dry_run_port = "Port:" +dry_run_config_file = "Config file:" +end_dry_run = "--- End of dry run ---" +proxy_initialized_successfully = "Caddy proxy initialized successfully on port {port}" +proxy_status_running = "Caddy proxy is running on port {port}" +proxy_reloaded_successfully = "Caddy proxy configuration reloaded successfully on port {port}" +proxy_stopped_successfully = "Caddy proxy stopped successfully on port {port}" +proxy_init_failed = "Failed to initialize Caddy proxy" +proxy_status_stopped = "Caddy proxy is not running on port {port}" +proxy_status_failed = "Failed to check Caddy proxy status" +proxy_reload_failed = "Failed to reload Caddy proxy configuration" +proxy_stop_failed = "Failed to stop Caddy proxy" +config_file_required = "Configuration file is required" +config_file_not_found = "Configuration file not found: {file}" +invalid_json_config = "Invalid JSON in configuration file: {error}" +port_must_be_between_1_and_65535 = "Port must be between 1 and 65535" +caddy_connection_failed = "Failed to connect to Caddy: {error}" +caddy_status_code_error = "Caddy returned status code: {code}" +caddy_load_failed = "Failed to load configuration: {code} - {response}" +debug_init_proxy = "Initializing Caddy proxy on port: {port}" +debug_check_status = "Checking Caddy proxy status on port: {port}" +debug_reload_config = "Reloading Caddy proxy configuration on port: {port}" +debug_stop_proxy = "Stopping Caddy proxy on port: {port}" +info_caddy_running = "Caddy is running" +info_config_loaded = "Configuration loaded successfully" +info_caddy_stopped = "Caddy stopped successfully" +debug_checking_caddy_status = "Checking Caddy status: GET {url}" +debug_caddy_response = "Caddy response: {code}" +debug_caddy_config_accessible = "Caddy config endpoint accessible" +debug_caddy_non_200 = "Caddy returned non-200 status: {code}" +debug_connection_refused = "Connection refused to Caddy admin API on port {port}" +debug_request_failed = "Request failed: {error}" +debug_unexpected_error = "Unexpected error during status check: {error}" +debug_loading_config_file = "Loading config file: {file}" +debug_config_parsed = "Config file parsed successfully" +debug_posting_config = "Posting config to Caddy: POST {url}" +debug_caddy_load_response = "Caddy load response: {code}" +debug_config_loaded_success = "Configuration loaded successfully" +debug_caddy_load_failed = "Caddy load failed: {error}" +debug_stopping_caddy = "Stopping Caddy: POST {url}" +debug_caddy_stop_response = "Caddy stop response: {code}" +debug_caddy_stopped_success = "Caddy stopped successfully" +debug_caddy_stop_failed = "Caddy stop failed: {error}" +caddy_is_running = "Caddy is running" +caddy_not_running = "Caddy not running" +config_file_required_error = "Configuration file is required" +config_file_not_found_error = "Config file not found: {file}" +invalid_json_error = "Invalid JSON: {error}" +cannot_connect_to_caddy = "Cannot connect to Caddy admin API on port {port}" +request_failed_error = "Request failed: {error}" +http_error = "HTTP {code}" +operation_timed_out = "Operation timed out after {timeout} seconds" +unexpected_error = "Unexpected error: {error}" diff --git a/cli/app/commands/proxy/status.py b/cli/app/commands/proxy/status.py new file mode 100644 index 00000000..a7072c66 --- /dev/null +++ b/cli/app/commands/proxy/status.py @@ -0,0 +1,112 @@ +from typing import Protocol + +from app.utils.config import Config, PROXY_PORT +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseCaddyCommandBuilder, BaseCaddyService, BaseConfig, BaseFormatter, BaseResult, BaseService +from .messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_mode, + dry_run_port, + end_dry_run, +) + +config = Config() +proxy_port = config.get_yaml_value(PROXY_PORT) + +class CaddyServiceProtocol(Protocol): + def check_status(self, port: int = proxy_port) -> tuple[bool, str]: ... + + +class CaddyCommandBuilder(BaseCaddyCommandBuilder): + @staticmethod + def build_status_command(port: int = proxy_port) -> list[str]: + return BaseCaddyCommandBuilder.build_status_command(port) + + +class StatusFormatter(BaseFormatter): + def format_output(self, result: "StatusResult", output: str) -> str: + if output == "json": + status_msg = "Caddy is running" if result.success else (result.error or "Caddy not running") + return super().format_output(result, output, status_msg, result.error or "Caddy not running") + + if result.success: + return "Caddy is running" + else: + return result.error or "Caddy not running" + + def format_dry_run(self, config: "StatusConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "port": dry_run_port, + "end": end_dry_run, + } + return super().format_dry_run(config, CaddyCommandBuilder(), dry_run_messages) + + +class CaddyService(BaseCaddyService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger) + + def get_status(self, port: int = proxy_port) -> tuple[bool, str]: + return self.check_status(port) + + +class StatusResult(BaseResult): + pass + + +class StatusConfig(BaseConfig): + pass + + +class StatusService(BaseService[StatusConfig, StatusResult]): + def __init__(self, config: StatusConfig, logger: LoggerProtocol = None, caddy_service: CaddyServiceProtocol = None): + super().__init__(config, logger, caddy_service) + self.caddy_service = caddy_service or CaddyService(self.logger) + self.formatter = StatusFormatter() + + def _create_result(self, success: bool, error: str = None) -> StatusResult: + return StatusResult( + proxy_port=self.config.proxy_port, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + ) + + def status(self) -> StatusResult: + return self.execute() + + def execute(self) -> StatusResult: + success, message = self.caddy_service.get_status(self.config.proxy_port) + return self._create_result(success, None if success else message) + + def status_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Status(BaseAction[StatusConfig, StatusResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = StatusFormatter() + + def status(self, config: StatusConfig) -> StatusResult: + return self.execute(config) + + def execute(self, config: StatusConfig) -> StatusResult: + service = StatusService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: StatusResult, output: str) -> str: + return self.formatter.format_output(result, output) diff --git a/cli/app/commands/proxy/stop.py b/cli/app/commands/proxy/stop.py new file mode 100644 index 00000000..24f7202d --- /dev/null +++ b/cli/app/commands/proxy/stop.py @@ -0,0 +1,120 @@ +from typing import Protocol + +from pydantic import BaseModel + +from app.utils.config import Config, PROXY_PORT +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseCaddyCommandBuilder, BaseCaddyService, BaseConfig, BaseFormatter, BaseResult, BaseService +from .messages import ( + debug_stop_proxy, + dry_run_command, + dry_run_command_would_be_executed, + dry_run_mode, + dry_run_port, + end_dry_run, + proxy_stop_failed, + proxy_stopped_successfully, +) + +config = Config() +proxy_port = config.get_yaml_value(PROXY_PORT) + + +class CaddyServiceProtocol(Protocol): + def stop_proxy(self, port: int = proxy_port) -> tuple[bool, str]: ... + + +class CaddyCommandBuilder(BaseCaddyCommandBuilder): + @staticmethod + def build_stop_command(port: int = proxy_port) -> list[str]: + return BaseCaddyCommandBuilder.build_stop_command(port) + + +class StopFormatter(BaseFormatter): + def format_output(self, result: "StopResult", output: str) -> str: + if output == "json": + success_msg = "Caddy stopped successfully" if result.success else "Failed to stop Caddy" + return super().format_output(result, output, success_msg, result.error or "Unknown error") + + if result.success: + return "Caddy stopped successfully" + else: + return result.error or "Failed to stop Caddy" + + def format_dry_run(self, config: "StopConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "port": dry_run_port, + "end": end_dry_run, + } + return super().format_dry_run(config, CaddyCommandBuilder(), dry_run_messages) + + +class CaddyService(BaseCaddyService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger) + + def stop_caddy(self, port: int = proxy_port) -> tuple[bool, str]: + return self.stop_proxy(port) + + +class StopResult(BaseResult): + pass + + +class StopConfig(BaseConfig): + pass + + +class StopService(BaseService[StopConfig, StopResult]): + def __init__(self, config: StopConfig, logger: LoggerProtocol = None, caddy_service: CaddyServiceProtocol = None): + super().__init__(config, logger, caddy_service) + self.caddy_service = caddy_service or CaddyService(self.logger) + self.formatter = StopFormatter() + + def _create_result(self, success: bool, error: str = None) -> StopResult: + return StopResult( + proxy_port=self.config.proxy_port, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + ) + + def stop(self) -> StopResult: + return self.execute() + + def execute(self) -> StopResult: + success, message = self.caddy_service.stop_caddy(self.config.proxy_port) + return self._create_result(success, None if success else message) + + def stop_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Stop(BaseAction[StopConfig, StopResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = StopFormatter() + + def stop(self, config: StopConfig) -> StopResult: + return self.execute(config) + + def execute(self, config: StopConfig) -> StopResult: + service = StopService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: StopResult, output: str) -> str: + return self.formatter.format_output(result, output) diff --git a/cli/app/commands/service/__init__.py b/cli/app/commands/service/__init__.py new file mode 100644 index 00000000..cbcf0995 --- /dev/null +++ b/cli/app/commands/service/__init__.py @@ -0,0 +1 @@ +# Service package diff --git a/cli/app/commands/service/base.py b/cli/app/commands/service/base.py new file mode 100644 index 00000000..3ee5e38b --- /dev/null +++ b/cli/app/commands/service/base.py @@ -0,0 +1,256 @@ +import os +import subprocess +from typing import Generic, Optional, Protocol, TypeVar + +from pydantic import BaseModel, Field, field_validator + +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.protocols import LoggerProtocol +from .messages import ( + service_action_info, + service_action_success, + service_action_failed, + service_action_unexpected_error, + environment_file_not_found, + compose_file_not_found, + docker_command_executing, + docker_command_completed, + docker_command_failed, + docker_command_stdout, + docker_command_stderr, + docker_unexpected_error, +) + +TConfig = TypeVar("TConfig", bound=BaseModel) +TResult = TypeVar("TResult", bound=BaseModel) + + +class DockerServiceProtocol(Protocol): + def execute_services( + self, name: str = "all", env_file: str = None, compose_file: str = None, **kwargs + ) -> tuple[bool, str]: ... + + +class BaseDockerCommandBuilder: + @staticmethod + def build_command(action: str, name: str = "all", env_file: str = None, compose_file: str = None, **kwargs) -> list[str]: + cmd = ["docker", "compose"] + if compose_file: + cmd.extend(["-f", compose_file]) + cmd.append(action) + + if action == "up" and kwargs.get("detach", False): + cmd.append("-d") + + if env_file: + cmd.extend(["--env-file", env_file]) + + if name != "all": + cmd.append(name) + + return cmd + + +class BaseFormatter: + def __init__(self): + self.output_formatter = OutputFormatter() + + def format_output(self, result: TResult, output: str, success_message: str, error_message: str) -> str: + if result.success: + if output == "json": + # For JSON, return formatted data structure + message = success_message.format(services=result.name) + output_message = self.output_formatter.create_success_message(message, result.model_dump()) + return self.output_formatter.format_output(output_message, output) + else: + # For text format, return only docker output or empty (command.py handles success message) + if result.verbose and result.docker_output and result.docker_output.strip(): + return f"Docker Command Output:\n{result.docker_output.strip()}" + return "" + else: + # Always format errors the same way + error = result.error or "Unknown error occurred" + output_message = self.output_formatter.create_error_message(error, result.model_dump()) + return self.output_formatter.format_output(output_message, output) + + def format_dry_run(self, config: TConfig, command_builder, dry_run_messages: dict) -> str: + if hasattr(command_builder, "build_up_command"): + cmd = command_builder.build_up_command( + getattr(config, "name", "all"), + getattr(config, "detach", True), + getattr(config, "env_file", None), + getattr(config, "compose_file", None), + ) + elif hasattr(command_builder, "build_down_command"): + cmd = command_builder.build_down_command( + getattr(config, "name", "all"), getattr(config, "env_file", None), getattr(config, "compose_file", None) + ) + elif hasattr(command_builder, "build_ps_command"): + cmd = command_builder.build_ps_command( + getattr(config, "name", "all"), getattr(config, "env_file", None), getattr(config, "compose_file", None) + ) + elif hasattr(command_builder, "build_restart_command"): + cmd = command_builder.build_restart_command( + getattr(config, "name", "all"), getattr(config, "env_file", None), getattr(config, "compose_file", None) + ) + else: + cmd = command_builder.build_command(config) + + output = [] + output.append(dry_run_messages["mode"]) + output.append(dry_run_messages["command_would_be_executed"]) + output.append(f"{dry_run_messages['command']} {' '.join(cmd)}") + output.append(f"{dry_run_messages['service']} {getattr(config, 'name', 'all')}") + + if hasattr(config, "detach"): + output.append(f"{dry_run_messages.get('detach_mode', 'Detach mode:')} {getattr(config, 'detach', True)}") + + if getattr(config, "env_file", None): + output.append(f"{dry_run_messages['env_file']} {getattr(config, 'env_file')}") + + output.append(dry_run_messages["end"]) + return "\n".join(output) + + +class BaseDockerService: + def __init__(self, logger: LoggerProtocol, action: str): + self.logger = logger + self.action = action + + def execute_services( + self, name: str = "all", env_file: str = None, compose_file: str = None, **kwargs + ) -> tuple[bool, str]: + cmd = BaseDockerCommandBuilder.build_command(self.action, name, env_file, compose_file, **kwargs) + + self.logger.debug(docker_command_executing.format(command=' '.join(cmd))) + + try: + self.logger.debug(service_action_info.format(action=self.action, name=name)) + + if self.action == "up" and not kwargs.get("detach", False): + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, universal_newlines=True) + + output_lines = [] + self.logger.debug("Docker container logs:") + self.logger.debug("-" * 50) + + for line in process.stdout: + self.logger.debug(line.rstrip()) # Stream logs through logger + output_lines.append(line.rstrip()) + + return_code = process.wait() + + full_output = '\n'.join(output_lines) + + if return_code == 0: + self.logger.debug(docker_command_completed.format(action=self.action)) + if full_output.strip(): + self.logger.debug(docker_command_stdout.format(output=full_output.strip())) + return True, full_output + else: + self.logger.debug(docker_command_failed.format(return_code=return_code)) + if full_output.strip(): + self.logger.debug(docker_command_stderr.format(output=full_output.strip())) + self.logger.error(service_action_failed.format(action=self.action, error=full_output or f"Process exited with code {return_code}")) + return False, full_output or f"Process exited with code {return_code}" + else: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + self.logger.debug(docker_command_completed.format(action=self.action)) + + if result.stdout.strip(): + self.logger.debug(docker_command_stdout.format(output=result.stdout.strip())) + + if result.stderr.strip(): + self.logger.debug(docker_command_stderr.format(output=result.stderr.strip())) + + return True, result.stdout or result.stderr + + except subprocess.CalledProcessError as e: + self.logger.debug(docker_command_failed.format(return_code=e.returncode)) + + if e.stdout and e.stdout.strip(): + self.logger.debug(docker_command_stdout.format(output=e.stdout.strip())) + + if e.stderr and e.stderr.strip(): + self.logger.debug(docker_command_stderr.format(output=e.stderr.strip())) + + self.logger.error(service_action_failed.format(action=self.action, error=e.stderr or str(e))) + return False, e.stderr or e.stdout or str(e) + except Exception as e: + self.logger.debug(docker_unexpected_error.format(action=self.action, error=str(e))) + self.logger.error(service_action_unexpected_error.format(action=self.action, error=e)) + return False, str(e) + + +class BaseConfig(BaseModel): + name: str = Field("all", description="Name of the service") + env_file: Optional[str] = Field(None, description="Path to environment file") + verbose: bool = Field(False, description="Verbose output") + output: str = Field("text", description="Output format: text, json") + dry_run: bool = Field(False, description="Dry run mode") + compose_file: Optional[str] = Field(None, description="Path to the compose file") + + @field_validator("env_file") + @classmethod + def validate_env_file(cls, env_file: str) -> Optional[str]: + if not env_file: + return None + stripped_env_file = env_file.strip() + if not stripped_env_file: + return None + if not os.path.exists(stripped_env_file): + raise ValueError(environment_file_not_found.format(path=stripped_env_file)) + return stripped_env_file + + @field_validator("compose_file") + @classmethod + def validate_compose_file(cls, compose_file: str) -> Optional[str]: + if not compose_file: + return None + stripped_compose_file = compose_file.strip() + if not stripped_compose_file: + return None + if not os.path.exists(stripped_compose_file): + raise ValueError(compose_file_not_found.format(path=stripped_compose_file)) + return stripped_compose_file + + +class BaseResult(BaseModel): + name: str + env_file: Optional[str] + verbose: bool + output: str + success: bool = False + error: Optional[str] = None + docker_output: Optional[str] = None + + +class BaseService(Generic[TConfig, TResult]): + def __init__(self, config: TConfig, logger: LoggerProtocol = None, docker_service: DockerServiceProtocol = None): + self.config = config + self.logger = logger or Logger(verbose=config.verbose) + self.docker_service = docker_service + self.formatter = None + + def _create_result(self, success: bool, error: str = None) -> TResult: + raise NotImplementedError + + def execute(self) -> TResult: + raise NotImplementedError + + def execute_and_format(self) -> str: + raise NotImplementedError + + +class BaseAction(Generic[TConfig, TResult]): + def __init__(self, logger: LoggerProtocol = None): + self.logger = logger + self.formatter = None + + def execute(self, config: TConfig) -> TResult: + raise NotImplementedError + + def format_output(self, result: TResult, output: str) -> str: + raise NotImplementedError diff --git a/cli/app/commands/service/command.py b/cli/app/commands/service/command.py new file mode 100644 index 00000000..864814c7 --- /dev/null +++ b/cli/app/commands/service/command.py @@ -0,0 +1,214 @@ +import json +import typer + +from app.utils.config import Config, DEFAULT_COMPOSE_FILE, NIXOPUS_CONFIG_DIR +from app.utils.logger import Logger +from app.utils.output_formatter import OutputFormatter +from app.utils.timeout import TimeoutWrapper + +from .down import Down, DownConfig +from .messages import services_started_successfully, services_stopped_successfully, services_status_retrieved, services_restarted_successfully +from .ps import Ps, PsConfig +from .restart import Restart, RestartConfig +from .up import Up, UpConfig + +service_app = typer.Typer(help="Manage Nixopus services") + +config = Config() +nixopus_config_dir = config.get_yaml_value(NIXOPUS_CONFIG_DIR) +compose_file = config.get_yaml_value(DEFAULT_COMPOSE_FILE) +compose_file_path = nixopus_config_dir + "/" + compose_file + + +@service_app.command() +def up( + name: str = typer.Option("all", "--name", "-n", help="The name of the service to start, defaults to all"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", help="Dry run"), + detach: bool = typer.Option(False, "--detach", "-d", help="Detach from the service and run in the background"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + compose_file: str = typer.Option(compose_file_path, "--compose-file", "-f", help="Path to the compose file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Start Nixopus services""" + logger = Logger(verbose=verbose) + + try: + config = UpConfig( + name=name, + detach=detach, + env_file=env_file, + verbose=verbose, + output=output, + dry_run=dry_run, + compose_file=compose_file, + ) + + up_service = Up(logger=logger) + + with TimeoutWrapper(timeout): + if config.dry_run: + formatted_output = up_service.format_dry_run(config) + logger.info(formatted_output) + return + else: + result = up_service.up(config) + + if result.success: + formatted_output = up_service.format_output(result, output) + if output == "json": + logger.info(formatted_output) + else: + logger.success(services_started_successfully.format(services=result.name)) + if formatted_output: + logger.info(formatted_output) + else: + logger.error(result.error if result.error is not None else "Unknown error") + raise typer.Exit(1) + + except TimeoutError as e: + logger.error(e) + raise typer.Exit(1) + except Exception as e: + logger.error(str(e)) + raise typer.Exit(1) + + +@service_app.command() +def down( + name: str = typer.Option("all", "--name", "-n", help="The name of the service to stop, defaults to all"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", help="Dry run"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + compose_file: str = typer.Option(compose_file_path, "--compose-file", "-f", help="Path to the compose file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Stop Nixopus services""" + logger = Logger(verbose=verbose) + + try: + config = DownConfig( + name=name, env_file=env_file, verbose=verbose, output=output, dry_run=dry_run, compose_file=compose_file + ) + + down_service = Down(logger=logger) + + with TimeoutWrapper(timeout): + if config.dry_run: + formatted_output = down_service.format_dry_run(config) + logger.info(formatted_output) + return + else: + result = down_service.down(config) + + if result.success: + formatted_output = down_service.format_output(result, output) + if output == "json": + logger.info(formatted_output) + else: + logger.success(services_stopped_successfully.format(services=result.name)) + if formatted_output: + logger.info(formatted_output) + else: + logger.error(result.error) + raise typer.Exit(1) + + except TimeoutError as e: + logger.error(e) + raise typer.Exit(1) + except Exception as e: + logger.error(str(e)) + raise typer.Exit(1) + + +@service_app.command() +def ps( + name: str = typer.Option("all", "--name", "-n", help="The name of the service to show, defaults to all"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + compose_file: str = typer.Option(compose_file_path, "--compose-file", "-f", help="Path to the compose file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Show status of Nixopus services""" + logger = Logger(verbose=verbose) + + try: + config = PsConfig( + name=name, env_file=env_file, verbose=verbose, output=output, dry_run=dry_run, compose_file=compose_file + ) + + ps_service = Ps(logger=logger) + + with TimeoutWrapper(timeout): + if config.dry_run: + formatted_output = ps_service.format_dry_run(config) + logger.info(formatted_output) + return + else: + result = ps_service.ps(config) + + if result.success: + formatted_output = ps_service.format_output(result, output) + logger.info(formatted_output) + else: + logger.error(result.error) + raise typer.Exit(1) + + except TimeoutError as e: + logger.error(e) + raise typer.Exit(1) + except Exception as e: + logger.error(str(e)) + raise typer.Exit(1) + + +@service_app.command() +def restart( + name: str = typer.Option("all", "--name", "-n", help="The name of the service to restart, defaults to all"), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), + output: str = typer.Option("text", "--output", "-o", help="Output format, text, json"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Dry run"), + env_file: str = typer.Option(None, "--env-file", "-e", help="Path to the environment file"), + compose_file: str = typer.Option(compose_file_path, "--compose-file", "-f", help="Path to the compose file"), + timeout: int = typer.Option(10, "--timeout", "-t", help="Timeout in seconds"), +): + """Restart Nixopus services""" + logger = Logger(verbose=verbose) + + try: + config = RestartConfig( + name=name, env_file=env_file, verbose=verbose, output=output, dry_run=dry_run, compose_file=compose_file + ) + + restart_service = Restart(logger=logger) + + with TimeoutWrapper(timeout): + if config.dry_run: + formatted_output = restart_service.format_dry_run(config) + logger.info(formatted_output) + return + else: + result = restart_service.restart(config) + + if result.success: + formatted_output = restart_service.format_output(result, output) + if output == "json": + logger.info(formatted_output) + else: + logger.success(services_restarted_successfully.format(services=result.name)) + if formatted_output: + logger.info(formatted_output) + else: + logger.error(result.error) + raise typer.Exit(1) + + except TimeoutError as e: + logger.error(e) + raise typer.Exit(1) + except Exception as e: + logger.error(str(e)) + raise typer.Exit(1) diff --git a/cli/app/commands/service/down.py b/cli/app/commands/service/down.py new file mode 100644 index 00000000..ccc21b61 --- /dev/null +++ b/cli/app/commands/service/down.py @@ -0,0 +1,120 @@ +import os +import subprocess +from typing import Optional, Protocol + +from pydantic import BaseModel, Field, field_validator + +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseConfig, BaseDockerCommandBuilder, BaseDockerService, BaseFormatter, BaseResult, BaseService +from .messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_env_file, + dry_run_mode, + dry_run_service, + end_dry_run, + service_stop_failed, + services_stopped_successfully, +) + + +class DockerServiceProtocol(Protocol): + def stop_services(self, name: str = "all", env_file: str = None, compose_file: str = None) -> tuple[bool, str]: ... + + +class DockerCommandBuilder(BaseDockerCommandBuilder): + @staticmethod + def build_down_command(name: str = "all", env_file: str = None, compose_file: str = None) -> list[str]: + return BaseDockerCommandBuilder.build_command("down", name, env_file, compose_file) + + +class DownFormatter(BaseFormatter): + def format_output(self, result: "DownResult", output: str) -> str: + return super().format_output(result, output, services_stopped_successfully, service_stop_failed) + + def format_dry_run(self, config: "DownConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "service": dry_run_service, + "env_file": dry_run_env_file, + "end": end_dry_run, + } + return super().format_dry_run(config, DockerCommandBuilder(), dry_run_messages) + + +class DockerService(BaseDockerService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger, "down") + + def stop_services(self, name: str = "all", env_file: str = None, compose_file: str = None) -> tuple[bool, str]: + return self.execute_services(name, env_file, compose_file) + + +class DownResult(BaseResult): + pass + + +class DownConfig(BaseConfig): + pass + + +class DownService(BaseService[DownConfig, DownResult]): + def __init__(self, config: DownConfig, logger: LoggerProtocol = None, docker_service: DockerServiceProtocol = None): + super().__init__(config, logger, docker_service) + self.docker_service = docker_service or DockerService(self.logger) + self.formatter = DownFormatter() + + def _create_result(self, success: bool, error: str = None, docker_output: str = None) -> DownResult: + return DownResult( + name=self.config.name, + env_file=self.config.env_file, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + docker_output=docker_output, + ) + + def down(self) -> DownResult: + return self.execute() + + def execute(self) -> DownResult: + self.logger.debug(f"Stopping services: {self.config.name}") + + success, docker_output = self.docker_service.stop_services(self.config.name, self.config.env_file, self.config.compose_file) + + error = None if success else docker_output + return self._create_result(success, error, docker_output) + + def down_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Down(BaseAction[DownConfig, DownResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = DownFormatter() + + def down(self, config: DownConfig) -> DownResult: + return self.execute(config) + + def execute(self, config: DownConfig) -> DownResult: + service = DownService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: DownResult, output: str) -> str: + return self.formatter.format_output(result, output) + + def format_dry_run(self, config: DownConfig) -> str: + return self.formatter.format_dry_run(config) diff --git a/cli/app/commands/service/messages.py b/cli/app/commands/service/messages.py new file mode 100644 index 00000000..b940ea67 --- /dev/null +++ b/cli/app/commands/service/messages.py @@ -0,0 +1,38 @@ +dry_run_mode = "=== DRY RUN MODE ===" +dry_run_command_would_be_executed = "The following commands would be executed:" +dry_run_command = "Command:" +dry_run_service = "Service:" +dry_run_detach_mode = "Detach mode:" +dry_run_env_file = "Environment file:" +end_dry_run = "=== END DRY RUN ===" +starting_services = "Starting services: {services}" +services_started_successfully = "Services started successfully: {services}" +service_start_failed = "Service start failed: {error}" +unexpected_error_during_start = "Unexpected error during start: {error}" +stopping_services = "Stopping services: {services}" +services_stopped_successfully = "Services stopped successfully: {services}" +service_stop_failed = "Service stop failed: {error}" +unexpected_error_during_stop = "Unexpected error during stop: {error}" +checking_services = "Checking status of services: {services}" +services_status_retrieved = "Services status retrieved successfully: {services}" +service_status_failed = "Service status check failed: {error}" +unexpected_error_during_status = "Unexpected error during status check: {error}" +restarting_services = "Restarting services: {services}" +services_restarted_successfully = "Services restarted successfully: {services}" +service_restart_failed = "Service restart failed: {error}" +unexpected_error_during_restart = "Unexpected error during restart: {error}" +unknown_error = "Unknown error occurred" +service_action_info = "{action} services: {name}" +service_action_success = "Service {action} successful: {name}" +service_action_failed = "Service {action} failed: {error}" +service_action_unexpected_error = "Unexpected error during {action}: {error}" +environment_file_not_found = "Environment file not found: {path}" +compose_file_not_found = "Compose file not found: {path}" +docker_command_executing = "Executing Docker command: {command}" +docker_command_completed = "Docker command completed successfully for {action} action" +docker_command_failed = "Docker command failed with return code {return_code}" +docker_command_stdout = "Docker command stdout: {output}" +docker_command_stderr = "Docker command stderr: {output}" +docker_unexpected_error = "Unexpected error during {action} action: {error}" +command_output_label = "Command output: {output}" +command_error_label = "Command error: {output}" \ No newline at end of file diff --git a/cli/app/commands/service/ps.py b/cli/app/commands/service/ps.py new file mode 100644 index 00000000..5b6eccce --- /dev/null +++ b/cli/app/commands/service/ps.py @@ -0,0 +1,216 @@ +import json +import subprocess + +from app.utils.protocols import DockerServiceProtocol, LoggerProtocol + +from .base import BaseAction, BaseConfig, BaseDockerCommandBuilder, BaseDockerService, BaseFormatter, BaseResult, BaseService +from .messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_env_file, + dry_run_mode, + dry_run_service, + end_dry_run, + service_status_failed, + services_status_retrieved, + docker_command_executing, + docker_command_completed, + docker_command_failed, + docker_command_stdout, + docker_command_stderr, + docker_unexpected_error, + service_action_info, + service_action_failed, + service_action_unexpected_error, +) + + +class DockerCommandBuilder(BaseDockerCommandBuilder): + @staticmethod + def build_ps_command(name: str = "all", env_file: str = None, compose_file: str = None) -> list[str]: + cmd = ["docker", "compose"] + if compose_file: + cmd.extend(["-f", compose_file]) + cmd.extend(["config", "--format", "json"]) + if env_file: + cmd.extend(["--env-file", env_file]) + return cmd + + +class PsFormatter(BaseFormatter): + def format_output(self, result: "PsResult", output: str) -> str: + if result.success: + if output == "json": + message = services_status_retrieved.format(services=result.name) + output_message = self.output_formatter.create_success_message(message, result.model_dump()) + return self.output_formatter.format_output(output_message, output) + else: + if result.docker_output and result.docker_output.strip(): + try: + config_data = json.loads(result.docker_output) + services = config_data.get("services", {}) + + if services: + table_data = [] + for service_name, service_config in services.items(): + ports = service_config.get("ports", []) + port_mappings = [] + for port in ports: + if isinstance(port, dict): + published = port.get("published", "") + target = port.get("target", "") + port_mappings.append(f"{published}:{target}") + else: + port_mappings.append(str(port)) + + networks = list(service_config.get("networks", {}).keys()) + + table_data.append({ + "Service": service_name, + "Image": service_config.get("image", ""), + "Ports": ", ".join(port_mappings) if port_mappings else "", + "Networks": ", ".join(networks) if networks else "default", + "Command": str(service_config.get("command", "")) if service_config.get("command") else "", + "Entrypoint": str(service_config.get("entrypoint", "")) if service_config.get("entrypoint") else "", + }) + + if result.name != "all": + table_data = [row for row in table_data if row["Service"] == result.name] + + if table_data: + headers = ["Service", "Image", "Ports", "Networks", "Command", "Entrypoint"] + return self.output_formatter.create_table( + data=table_data, + title="Docker Compose Services Configuration", + headers=headers, + show_header=True, + show_lines=True + ).strip() + else: + return f"No service found with name: {result.name}" if result.name != "all" else "No services found" + else: + return "No services found in compose file" + except json.JSONDecodeError as e: + return result.docker_output.strip() + else: + return "No configuration found" + else: + return super().format_output(result, output, services_status_retrieved, service_status_failed) + + def format_dry_run(self, config: "PsConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "service": dry_run_service, + "env_file": dry_run_env_file, + "end": end_dry_run, + } + return super().format_dry_run(config, DockerCommandBuilder(), dry_run_messages) + + +class DockerService(BaseDockerService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger, "config") + + def show_services_status(self, name: str = "all", env_file: str = None, compose_file: str = None) -> tuple[bool, str]: + cmd = DockerCommandBuilder.build_ps_command(name, env_file, compose_file) + + self.logger.debug(docker_command_executing.format(command=' '.join(cmd))) + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + self.logger.debug(docker_command_completed.format(action="ps")) + + if result.stdout.strip(): + self.logger.debug(docker_command_stdout.format(output=result.stdout.strip())) + + if result.stderr.strip(): + self.logger.debug(docker_command_stderr.format(output=result.stderr.strip())) + + return True, result.stdout or result.stderr + + except subprocess.CalledProcessError as e: + self.logger.debug(docker_command_failed.format(return_code=e.returncode)) + + if e.stdout and e.stdout.strip(): + self.logger.debug(docker_command_stdout.format(output=e.stdout.strip())) + + if e.stderr and e.stderr.strip(): + self.logger.debug(docker_command_stderr.format(output=e.stderr.strip())) + + self.logger.error(service_action_failed.format(action="ps", error=e.stderr or str(e))) + return False, e.stderr or e.stdout or str(e) + except Exception as e: + self.logger.debug(docker_unexpected_error.format(action="ps", error=str(e))) + self.logger.error(service_action_unexpected_error.format(action="ps", error=e)) + return False, str(e) + + +class PsResult(BaseResult): + pass + + +class PsConfig(BaseConfig): + pass + + +class PsService(BaseService[PsConfig, PsResult]): + def __init__(self, config: PsConfig, logger: LoggerProtocol = None, docker_service: DockerServiceProtocol = None): + super().__init__(config, logger, docker_service) + self.docker_service = docker_service or DockerService(self.logger) + self.formatter = PsFormatter() + + def _create_result(self, success: bool, error: str = None, docker_output: str = None) -> PsResult: + return PsResult( + name=self.config.name, + env_file=self.config.env_file, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + docker_output=docker_output, + ) + + def ps(self) -> PsResult: + return self.execute() + + def execute(self) -> PsResult: + self.logger.debug(f"Checking status of services: {self.config.name}") + + success, docker_output = self.docker_service.show_services_status( + self.config.name, self.config.env_file, self.config.compose_file + ) + + error = None if success else docker_output + return self._create_result(success, error, docker_output) + + def ps_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Ps(BaseAction[PsConfig, PsResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = PsFormatter() + + def ps(self, config: PsConfig) -> PsResult: + return self.execute(config) + + def execute(self, config: PsConfig) -> PsResult: + service = PsService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: PsResult, output: str) -> str: + return self.formatter.format_output(result, output) + + def format_dry_run(self, config: PsConfig) -> str: + return self.formatter.format_dry_run(config) diff --git a/cli/app/commands/service/restart.py b/cli/app/commands/service/restart.py new file mode 100644 index 00000000..2ae8ca37 --- /dev/null +++ b/cli/app/commands/service/restart.py @@ -0,0 +1,109 @@ +from app.utils.protocols import DockerServiceProtocol, LoggerProtocol + +from .base import BaseAction, BaseConfig, BaseDockerCommandBuilder, BaseDockerService, BaseFormatter, BaseResult, BaseService +from .messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_env_file, + dry_run_mode, + dry_run_service, + end_dry_run, + service_restart_failed, + services_restarted_successfully, +) + + +class DockerCommandBuilder(BaseDockerCommandBuilder): + @staticmethod + def build_restart_command(name: str = "all", env_file: str = None, compose_file: str = None) -> list[str]: + return BaseDockerCommandBuilder.build_command("restart", name, env_file, compose_file) + + +class RestartFormatter(BaseFormatter): + def format_output(self, result: "RestartResult", output: str) -> str: + return super().format_output(result, output, services_restarted_successfully, service_restart_failed) + + def format_dry_run(self, config: "RestartConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "service": dry_run_service, + "env_file": dry_run_env_file, + "end": end_dry_run, + } + return super().format_dry_run(config, DockerCommandBuilder(), dry_run_messages) + + +class DockerService(BaseDockerService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger, "restart") + + def restart_services(self, name: str = "all", env_file: str = None, compose_file: str = None) -> tuple[bool, str]: + return self.execute_services(name, env_file, compose_file) + + +class RestartResult(BaseResult): + pass + + +class RestartConfig(BaseConfig): + pass + + +class RestartService(BaseService[RestartConfig, RestartResult]): + def __init__(self, config: RestartConfig, logger: LoggerProtocol = None, docker_service: DockerServiceProtocol = None): + super().__init__(config, logger, docker_service) + self.docker_service = docker_service or DockerService(self.logger) + self.formatter = RestartFormatter() + + def _create_result(self, success: bool, error: str = None, docker_output: str = None) -> RestartResult: + return RestartResult( + name=self.config.name, + env_file=self.config.env_file, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + docker_output=docker_output, + ) + + def restart(self) -> RestartResult: + return self.execute() + + def execute(self) -> RestartResult: + self.logger.debug(f"Restarting services: {self.config.name}") + + success, docker_output = self.docker_service.restart_services(self.config.name, self.config.env_file, self.config.compose_file) + + error = None if success else docker_output + return self._create_result(success, error, docker_output) + + def restart_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Restart(BaseAction[RestartConfig, RestartResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = RestartFormatter() + + def restart(self, config: RestartConfig) -> RestartResult: + return self.execute(config) + + def execute(self, config: RestartConfig) -> RestartResult: + service = RestartService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: RestartResult, output: str) -> str: + return self.formatter.format_output(result, output) + + def format_dry_run(self, config: RestartConfig) -> str: + return self.formatter.format_dry_run(config) diff --git a/cli/app/commands/service/up.py b/cli/app/commands/service/up.py new file mode 100644 index 00000000..819c1db7 --- /dev/null +++ b/cli/app/commands/service/up.py @@ -0,0 +1,126 @@ +from typing import Protocol + +from pydantic import Field + +from app.utils.protocols import LoggerProtocol + +from .base import BaseAction, BaseConfig, BaseDockerCommandBuilder, BaseDockerService, BaseFormatter, BaseResult, BaseService +from .messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_detach_mode, + dry_run_env_file, + dry_run_mode, + dry_run_service, + end_dry_run, + service_start_failed, + services_started_successfully, +) + + +class DockerServiceProtocol(Protocol): + def start_services( + self, name: str = "all", detach: bool = True, env_file: str = None, compose_file: str = None + ) -> tuple[bool, str]: ... + + +class DockerCommandBuilder(BaseDockerCommandBuilder): + @staticmethod + def build_up_command(name: str = "all", detach: bool = True, env_file: str = None, compose_file: str = None) -> list[str]: + return BaseDockerCommandBuilder.build_command("up", name, env_file, compose_file, detach=detach) + + +class UpFormatter(BaseFormatter): + def format_output(self, result: "UpResult", output: str) -> str: + return super().format_output(result, output, services_started_successfully, service_start_failed) + + def format_dry_run(self, config: "UpConfig") -> str: + dry_run_messages = { + "mode": dry_run_mode, + "command_would_be_executed": dry_run_command_would_be_executed, + "command": dry_run_command, + "service": dry_run_service, + "detach_mode": dry_run_detach_mode, + "env_file": dry_run_env_file, + "end": end_dry_run, + } + return super().format_dry_run(config, DockerCommandBuilder(), dry_run_messages) + + +class DockerService(BaseDockerService): + def __init__(self, logger: LoggerProtocol): + super().__init__(logger, "up") + + def start_services( + self, name: str = "all", detach: bool = False, env_file: str = None, compose_file: str = None + ) -> tuple[bool, str]: + return self.execute_services(name, env_file, compose_file, detach=detach) + + +class UpResult(BaseResult): + detach: bool + + +class UpConfig(BaseConfig): + detach: bool = Field(False, description="Run services in detached mode") + + +class UpService(BaseService[UpConfig, UpResult]): + def __init__(self, config: UpConfig, logger: LoggerProtocol = None, docker_service: DockerServiceProtocol = None): + super().__init__(config, logger, docker_service) + self.docker_service = docker_service or DockerService(self.logger) + self.formatter = UpFormatter() + + def _create_result(self, success: bool, error: str = None, docker_output: str = None) -> UpResult: + return UpResult( + name=self.config.name, + detach=self.config.detach, + env_file=self.config.env_file, + verbose=self.config.verbose, + output=self.config.output, + success=success, + error=error, + docker_output=docker_output, + ) + + def up(self) -> UpResult: + return self.execute() + + def execute(self) -> UpResult: + self.logger.debug(f"Starting services: {self.config.name}") + + success, docker_output = self.docker_service.start_services( + self.config.name, self.config.detach, self.config.env_file, self.config.compose_file + ) + + error = None if success else docker_output + return self._create_result(success, error, docker_output) + + def up_and_format(self) -> str: + return self.execute_and_format() + + def execute_and_format(self) -> str: + if self.config.dry_run: + return self.formatter.format_dry_run(self.config) + + result = self.execute() + return self.formatter.format_output(result, self.config.output) + + +class Up(BaseAction[UpConfig, UpResult]): + def __init__(self, logger: LoggerProtocol = None): + super().__init__(logger) + self.formatter = UpFormatter() + + def up(self, config: UpConfig) -> UpResult: + return self.execute(config) + + def execute(self, config: UpConfig) -> UpResult: + service = UpService(config, logger=self.logger) + return service.execute() + + def format_output(self, result: UpResult, output: str) -> str: + return self.formatter.format_output(result, output) + + def format_dry_run(self, config: UpConfig) -> str: + return self.formatter.format_dry_run(config) diff --git a/cli/app/commands/test/__init__.py b/cli/app/commands/test/__init__.py new file mode 100644 index 00000000..13f8dbb2 --- /dev/null +++ b/cli/app/commands/test/__init__.py @@ -0,0 +1 @@ +# cli commands test module diff --git a/cli/app/commands/test/command.py b/cli/app/commands/test/command.py new file mode 100644 index 00000000..83d2132c --- /dev/null +++ b/cli/app/commands/test/command.py @@ -0,0 +1,14 @@ +import typer + +from .messages import test_app_help +from .test import TestCommand + +test_app = typer.Typer(help=test_app_help, invoke_without_command=True) + + +@test_app.callback() +def test_callback(ctx: typer.Context, target: str = typer.Argument(None, help="Test target (e.g., version)")): + """Run tests (only in DEVELOPMENT environment)""" + if ctx.invoked_subcommand is None: + test_command = TestCommand() + test_command.run(target) diff --git a/cli/app/commands/test/messages.py b/cli/app/commands/test/messages.py new file mode 100644 index 00000000..5dcc633c --- /dev/null +++ b/cli/app/commands/test/messages.py @@ -0,0 +1,3 @@ +test_app_help = "Run tests (only in DEVELOPMENT environment)" +development_only_error = "Test command is only available in DEVELOPMENT environment." +running_command = "Running: {command}" diff --git a/cli/app/commands/test/test.py b/cli/app/commands/test/test.py new file mode 100644 index 00000000..4ce1cdc1 --- /dev/null +++ b/cli/app/commands/test/test.py @@ -0,0 +1,25 @@ +import subprocess + +import typer + +from app.utils.config import Config +from app.utils.logger import Logger + +from .messages import development_only_error, running_command + + +class TestCommand: + def __init__(self): + self.config = Config() + self.logger = Logger() + + def run(self, target: str = typer.Argument(None, help="Test target (e.g., version)")): + if not self.config.is_development(): + self.logger.error(development_only_error) + raise typer.Exit(1) + cmd = ["make", "test"] + if target: + cmd.append(f"test-{target}") + self.logger.info(running_command.format(command=" ".join(cmd))) + result = subprocess.run(cmd) + raise typer.Exit(result.returncode) diff --git a/cli/app/commands/uninstall/__init__.py b/cli/app/commands/uninstall/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/app/commands/uninstall/command.py b/cli/app/commands/uninstall/command.py new file mode 100644 index 00000000..c6b51c7f --- /dev/null +++ b/cli/app/commands/uninstall/command.py @@ -0,0 +1,28 @@ +import typer + +from app.utils.logger import Logger +from app.utils.timeout import TimeoutWrapper +from .run import Uninstall + +uninstall_app = typer.Typer(help="Uninstall Nixopus", invoke_without_command=True) + + +@uninstall_app.callback() +def uninstall_callback( + ctx: typer.Context, + verbose: bool = typer.Option(False, "--verbose", "-v", help="Show more details while uninstalling"), + timeout: int = typer.Option(300, "--timeout", "-t", help="How long to wait for each step (in seconds)"), + dry_run: bool = typer.Option(False, "--dry-run", "-d", help="See what would happen, but don't make changes"), + force: bool = typer.Option(False, "--force", "-f", help="Remove files without confirmation prompts"), +): + """Uninstall Nixopus completely from the system""" + if ctx.invoked_subcommand is None: + logger = Logger(verbose=verbose) + uninstall = Uninstall( + logger=logger, + verbose=verbose, + timeout=timeout, + dry_run=dry_run, + force=force + ) + uninstall.run() \ No newline at end of file diff --git a/cli/app/commands/uninstall/messages.py b/cli/app/commands/uninstall/messages.py new file mode 100644 index 00000000..f9aa35cd --- /dev/null +++ b/cli/app/commands/uninstall/messages.py @@ -0,0 +1,27 @@ +uninstalling_nixopus = "Uninstalling Nixopus" +uninstall_failed = "Uninstall failed" +uninstall_completed = "Uninstall completed successfully" +stopping_services = "Stopping Nixopus services" +removing_ssh_keys = "Removing SSH keys from authorized_keys" +removing_config_directory = "Removing configuration directory" +services_stop_failed = "Failed to stop services" +ssh_keys_removal_failed = "Failed to remove SSH keys" +config_directory_removal_failed = "Failed to remove configuration directory" +operation_timed_out = "Operation timed out" +uninstall_dry_run_mode = "Dry run mode: would perform uninstall operations" +uninstall_completed_info = "Nixopus has been completely removed from your system." +uninstall_thank_you = "Thank you for using Nixopus!" +docker_not_running_skip_removal = "Docker is not running, skipping image removal" +authorized_keys_not_found = "authorized_keys file not found, skipping SSH key removal" +ssh_key_not_found_in_authorized_keys = "SSH key not found in authorized_keys" +compose_file_not_found_skip = "Compose file not found at {compose_file_path}, skipping service stop" +failed_at_step = "Failed at {step_name}" +removed_docker_image = "Removed Docker image: {image}" +failed_to_remove_image = "Failed to remove image {image}: {error}" +ssh_public_key_not_found_skip = "SSH public key not found at {public_key_path}, skipping authorized_keys cleanup" +removed_ssh_key_from = "Removed SSH key from {authorized_keys_path}" +removed_private_key = "Removed private key: {ssh_key_path}" +removed_public_key = "Removed public key: {public_key_path}" +config_dir_not_exist_skip = "Configuration directory {config_dir_path} does not exist, skipping removal" +removed_config_dir = "Removed configuration directory: {config_dir_path}" +skipped_removal_config_dir = "Skipped removal of configuration directory: {config_dir_path}" diff --git a/cli/app/commands/uninstall/run.py b/cli/app/commands/uninstall/run.py new file mode 100644 index 00000000..2aa70704 --- /dev/null +++ b/cli/app/commands/uninstall/run.py @@ -0,0 +1,185 @@ +import typer +import os +import shutil +import subprocess +from pathlib import Path +from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn +from app.utils.protocols import LoggerProtocol +from app.utils.config import Config, NIXOPUS_CONFIG_DIR, SSH_FILE_PATH, DEFAULT_COMPOSE_FILE +from app.utils.timeout import TimeoutWrapper +from app.commands.service.down import Down, DownConfig +from .messages import ( + uninstalling_nixopus, uninstall_failed, uninstall_completed, + services_stop_failed, + ssh_keys_removal_failed, config_directory_removal_failed, + operation_timed_out, uninstall_dry_run_mode, + uninstall_completed_info, uninstall_thank_you, + authorized_keys_not_found, ssh_key_not_found_in_authorized_keys, + compose_file_not_found_skip, failed_at_step, + ssh_public_key_not_found_skip, removed_ssh_key_from, removed_private_key, removed_public_key, + config_dir_not_exist_skip, removed_config_dir, skipped_removal_config_dir +) + +_config = Config() +_config_dir = _config.get_yaml_value(NIXOPUS_CONFIG_DIR) +_compose_file = _config.get_yaml_value(DEFAULT_COMPOSE_FILE) +_ssh_key_path = _config_dir + "/" + _config.get_yaml_value(SSH_FILE_PATH) + +class Uninstall: + def __init__(self, logger: LoggerProtocol = None, verbose: bool = False, timeout: int = 300, dry_run: bool = False, force: bool = False): + self.logger = logger + self.verbose = verbose + self.timeout = timeout + self.dry_run = dry_run + self.force = force + self.progress = None + self.main_task = None + + def run(self): + steps = [ + ("Stopping services", self._stop_services), + ("Removing SSH keys", self._remove_ssh_keys), + ("Removing configuration directory", self._remove_config_directory), + ] + + try: + if self.dry_run: + self.logger.info(uninstall_dry_run_mode) + for step_name, _ in steps: + self.logger.info(f"Would execute: {step_name}") + return + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + transient=True, + refresh_per_second=2, + ) as progress: + self.progress = progress + self.main_task = progress.add_task(uninstalling_nixopus, total=len(steps)) + + for i, (step_name, step_func) in enumerate(steps): + progress.update(self.main_task, description=f"{uninstalling_nixopus} - {step_name} ({i+1}/{len(steps)})") + try: + step_func() + progress.advance(self.main_task, 1) + except Exception as e: + progress.update(self.main_task, description=failed_at_step.format(step_name=step_name)) + raise + + progress.update(self.main_task, completed=True, description=uninstall_completed) + + self._show_success_message() + + except Exception as e: + self._handle_uninstall_error(e) + self.logger.error(f"{uninstall_failed}: {str(e)}") + raise typer.Exit(1) + + def _handle_uninstall_error(self, error, context=""): + context_msg = f" during {context}" if context else "" + if self.verbose: + self.logger.error(f"{uninstall_failed}{context_msg}: {str(error)}") + else: + self.logger.error(f"{uninstall_failed}{context_msg}") + + def _stop_services(self): + compose_file_path = os.path.join(_config_dir, _compose_file) + + if not os.path.exists(compose_file_path): + self.logger.debug(compose_file_not_found_skip.format(compose_file_path=compose_file_path)) + return + + try: + config = DownConfig( + name="all", + env_file=None, + verbose=self.verbose, + output="text", + dry_run=False, + compose_file=compose_file_path + ) + + down_service = Down(logger=self.logger) + + with TimeoutWrapper(self.timeout): + result = down_service.down(config) + + if not result.success: + raise Exception(f"{services_stop_failed}: {result.error}") + + except TimeoutError: + raise Exception(f"{services_stop_failed}: {operation_timed_out}") + + def _remove_ssh_keys(self): + ssh_key_path = Path(_ssh_key_path) + public_key_path = ssh_key_path.with_suffix('.pub') + + if not public_key_path.exists(): + self.logger.debug(ssh_public_key_not_found_skip.format(public_key_path=public_key_path)) + return + + try: + with open(public_key_path, 'r') as f: + public_key_content = f.read().strip() + + authorized_keys_path = Path.home() / '.ssh' / 'authorized_keys' + + if not authorized_keys_path.exists(): + self.logger.debug(authorized_keys_not_found) + return + + with open(authorized_keys_path, 'r') as f: + lines = f.readlines() + + original_count = len(lines) + filtered_lines = [line for line in lines if public_key_content not in line] + + if len(filtered_lines) < original_count: + with open(authorized_keys_path, 'w') as f: + f.writelines(filtered_lines) + self.logger.debug(removed_ssh_key_from.format(authorized_keys_path=authorized_keys_path)) + else: + self.logger.debug(ssh_key_not_found_in_authorized_keys) + + if ssh_key_path.exists(): + ssh_key_path.unlink() + self.logger.debug(removed_private_key.format(ssh_key_path=ssh_key_path)) + + if public_key_path.exists(): + public_key_path.unlink() + self.logger.debug(removed_public_key.format(public_key_path=public_key_path)) + + except Exception as e: + raise Exception(f"{ssh_keys_removal_failed}: {str(e)}") + + def _remove_config_directory(self): + config_dir_path = Path(_config_dir) + + if not config_dir_path.exists(): + self.logger.debug(config_dir_not_exist_skip.format(config_dir_path=config_dir_path)) + return + + try: + if self.force or self._confirm_removal(config_dir_path): + shutil.rmtree(config_dir_path) + self.logger.debug(removed_config_dir.format(config_dir_path=config_dir_path)) + else: + self.logger.info(skipped_removal_config_dir.format(config_dir_path=config_dir_path)) + + except Exception as e: + raise Exception(f"{config_directory_removal_failed}: {str(e)}") + + def _confirm_removal(self, path: Path) -> bool: + if self.force: + return True + + response = typer.confirm(f"Remove configuration directory {path}? This action cannot be undone.") + return response + + def _show_success_message(self): + self.logger.success(uninstall_completed) + self.logger.info(uninstall_completed_info) + self.logger.info(uninstall_thank_you) diff --git a/cli/app/commands/version/__init__.py b/cli/app/commands/version/__init__.py new file mode 100644 index 00000000..37cb5757 --- /dev/null +++ b/cli/app/commands/version/__init__.py @@ -0,0 +1 @@ +# cli commands version module diff --git a/cli/app/commands/version/command.py b/cli/app/commands/version/command.py new file mode 100644 index 00000000..7d3798c7 --- /dev/null +++ b/cli/app/commands/version/command.py @@ -0,0 +1,22 @@ +import typer + +from app.utils.message import application_version_help + +from .version import VersionCommand + +version_app = typer.Typer(help=application_version_help, invoke_without_command=True) + + +@version_app.callback() +def version_callback(ctx: typer.Context): + """Show version information (default)""" + if ctx.invoked_subcommand is None: + version_command = VersionCommand() + version_command.run() + + +def main_version_callback(value: bool): + if value: + version_command = VersionCommand() + version_command.run() + raise typer.Exit() diff --git a/cli/app/commands/version/version.py b/cli/app/commands/version/version.py new file mode 100644 index 00000000..b4c0d9c2 --- /dev/null +++ b/cli/app/commands/version/version.py @@ -0,0 +1,22 @@ +from importlib.metadata import version + +from rich.console import Console +from rich.panel import Panel +from rich.text import Text + + +class VersionCommand: + def __init__(self): + self.console = Console() + + def run(self): + """Display the version of the CLI""" + cli_version = version("nixopus") + + version_text = Text() + version_text.append("Nixopus CLI", style="bold blue") + version_text.append(f" v{cli_version}", style="green") + + panel = Panel(version_text, title="[bold white]Version Info[/bold white]", border_style="blue", padding=(0, 1)) + + self.console.print(panel) diff --git a/cli/app/main.py b/cli/app/main.py new file mode 100644 index 00000000..6f44706d --- /dev/null +++ b/cli/app/main.py @@ -0,0 +1,122 @@ +import os +import time +import typer + +from importlib.metadata import version as get_version +from rich.console import Console +from rich.panel import Panel +from rich.text import Text + +from app.commands.clone.command import clone_app +from app.commands.conf.command import conf_app +from app.commands.install.command import install_app +from app.commands.preflight.command import preflight_app +from app.commands.proxy.command import proxy_app +from app.commands.service.command import service_app +from app.commands.test.command import test_app +from app.commands.uninstall.command import uninstall_app +from app.commands.version.command import main_version_callback, version_app +from app.commands.conflict.command import conflict_app +from app.commands.version.version import VersionCommand +from app.utils.message import application_add_completion, application_description, application_name, application_version_help +from app.utils.config import Config + + +app = typer.Typer( + name=application_name, + help=application_description, + add_completion=application_add_completion, +) + + +@app.callback(invoke_without_command=True) +def main( + ctx: typer.Context, + version: bool = typer.Option( + None, + "--version", + "-v", + callback=main_version_callback, + help=application_version_help, + ), +): + if ctx.invoked_subcommand is None: + console = Console() + + ascii_art = r""" +····································· +: _ _ _ : +:| \ | (_) : +:| \| |___ _____ _ __ _ _ ___ : +:| . ` | \ \/ / _ \| '_ \| | | / __|: +:| |\ | |> < (_) | |_) | |_| \__ \: +:|_| \_|_/_/\_\___/| .__/ \__,_|___/: +: | | : +: |_| : +····································· + + + @%%@ + @%--+% + @@%#=---=%%@ + %%=-----------=%@ + %=----------------=*% + @#--------------------=% + #----+#%#=-----=###=---=% + @=--=-.....+=-==.....==--# + %=-=....=-..=+=..=-...==-*@ + @=-*...+%#:..=..-%*=...=-%@ + %-+....*+.+=-+=.**....==% + @%==#% @#=+....*-------+....#=% @%*=+% + @%--#@ %==*....%-+*+=#....*=+@ @#==#@ + @%--+@ %=--==....+*=....+=--#@ %===#@ + @=---+##=-------------=---====*##====#@ + %--------------------===============% + @=-----=+----------=======#======*@ + @@@@*----+------========%@@@@ + %%#%=---=*#=--=#==-=#+=====%%%% + @=----=*%@+---+@====#@%+=====#@ + @@@ @#=--=@ %====% @@@ + @*==*%@ @%*==%% + """ + + text = Text(ascii_art, style="bold cyan") + panel = Panel(text, title="[bold white]Welcome to[/bold white]", border_style="cyan", padding=(1, 2)) + + console.print(panel) + + cli_version = get_version("nixopus") + version_text = Text() + version_text.append("Version: ", style="bold white") + version_text.append(f"v{cli_version}", style="green") + + description_text = Text() + description_text.append(application_description, style="dim") + + console.print(version_text) + console.print(description_text) + console.print() + + help_text = Text() + help_text.append("Run ", style="dim") + help_text.append("nixopus --help", style="bold green") + help_text.append(" to explore all available commands", style="dim") + console.print(help_text) + + +app.add_typer(preflight_app, name="preflight") +app.add_typer(clone_app, name="clone") +app.add_typer(conflict_app, name="conflict") +app.add_typer(conf_app, name="conf") +app.add_typer(service_app, name="service") +app.add_typer(proxy_app, name="proxy") +app.add_typer(install_app, name="install") +app.add_typer(uninstall_app, name="uninstall") +app.add_typer(version_app, name="version") + +config = Config() +if config.is_development(): + app.add_typer(test_app, name="test") + +if __name__ == "__main__": + app() diff --git a/cli/app/utils/__init__.py b/cli/app/utils/__init__.py new file mode 100644 index 00000000..78f19b1d --- /dev/null +++ b/cli/app/utils/__init__.py @@ -0,0 +1 @@ +# cli utils module diff --git a/cli/app/utils/config.py b/cli/app/utils/config.py new file mode 100644 index 00000000..a43c6af3 --- /dev/null +++ b/cli/app/utils/config.py @@ -0,0 +1,150 @@ +import os +import sys +import yaml +import re +from app.utils.message import MISSING_CONFIG_KEY_MESSAGE + + +class Config: + def __init__(self, default_env="PRODUCTION"): + self.default_env = default_env + self._yaml_config = None + self._cache = {} + + # Check if running as PyInstaller bundle + if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"): + # Running as PyInstaller bundle + self._yaml_path = os.path.join(sys._MEIPASS, "helpers", "config.prod.yaml") + else: + # Running as normal Python script + self._yaml_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../helpers/config.prod.yaml")) + + def get_env(self): + return os.environ.get("ENV", self.default_env) + + def is_development(self): + return self.get_env().upper() == "DEVELOPMENT" + + def load_yaml_config(self): + if self._yaml_config is None: + with open(self._yaml_path, "r") as f: + self._yaml_config = yaml.safe_load(f) + return self._yaml_config + + def get_yaml_value(self, path: str): + config = self.load_yaml_config() + keys = path.split(".") + for key in keys: + if isinstance(config, dict) and key in config: + config = config[key] + else: + raise KeyError(MISSING_CONFIG_KEY_MESSAGE.format(path=path, key=key)) + if isinstance(config, str): + config = expand_env_placeholders(config) + return config + + def get_service_env_values(self, service_env_path: str): + config = self.get_yaml_value(service_env_path) + return {key: expand_env_placeholders(value) for key, value in config.items()} + + def load_user_config(self, config_file: str): + """Load and parse user config file, returning flattened config dict.""" + if not config_file: + return {} + + if not os.path.exists(config_file): + raise FileNotFoundError(f"Config file not found: {config_file}") + + with open(config_file, "r") as f: + user_config = yaml.safe_load(f) + + flattened = {} + self.flatten_config(user_config, flattened) + return flattened + + def flatten_config(self, config: dict, result: dict, prefix: str = ""): + """Flatten nested config dict into dot notation keys.""" + for key, value in config.items(): + new_key = f"{prefix}.{key}" if prefix else key + if isinstance(value, dict): + self.flatten_config(value, result, new_key) + else: + result[new_key] = value + + def unflatten_config(self, flattened_config: dict) -> dict: + """Convert flattened config back to nested structure.""" + nested = {} + for key, value in flattened_config.items(): + keys = key.split(".") + current = nested + for k in keys[:-1]: + if k not in current: + current[k] = {} + current = current[k] + current[keys[-1]] = value + return nested + + def get_config_value(self, key: str, user_config: dict, defaults: dict): + """Get config value from user config with fallback to defaults and caching.""" + if key in self._cache: + return self._cache[key] + + # Key mappings for user config lookup + key_mappings = { + "proxy_port": "services.caddy.env.PROXY_PORT", + "repo_url": "clone.repo", + "branch_name": "clone.branch", + "source_path": "clone.source-path", + "config_dir": "nixopus-config-dir", + "api_env_file_path": "services.api.env.API_ENV_FILE", + "view_env_file_path": "services.view.env.VIEW_ENV_FILE", + "compose_file": "compose-file-path", + "required_ports": "ports", + } + + config_path = key_mappings.get(key, key) + user_value = user_config.get(config_path) + value = user_value if user_value is not None else defaults.get(key) + + if value is None and key not in ["ssh_passphrase"]: + raise ValueError(f"Configuration key '{key}' has no default value") + + self._cache[key] = value + return value + + +def expand_env_placeholders(value: str) -> str: + # Expand environment placeholders in the form ${ENV_VAR:-default} + pattern = re.compile(r"\$\{([A-Za-z_][A-Za-z0-9_]*)(:-([^}]*))?}") + + def replacer(match): + var_name = match.group(1) + default = match.group(3) if match.group(2) else "" + return os.environ.get(var_name, default) + + return pattern.sub(replacer, value) + + +VIEW_ENV_FILE = "services.view.env.VIEW_ENV_FILE" +API_ENV_FILE = "services.api.env.API_ENV_FILE" +DEFAULT_REPO = "clone.repo" +DEFAULT_BRANCH = "clone.branch" +DEFAULT_PATH = "clone.source-path" +DEFAULT_COMPOSE_FILE = "compose-file-path" +NIXOPUS_CONFIG_DIR = "nixopus-config-dir" +PROXY_PORT = "services.caddy.env.PROXY_PORT" +CADDY_BASE_URL = "services.caddy.env.BASE_URL" +CONFIG_ENDPOINT = "services.caddy.env.CONFIG_ENDPOINT" +LOAD_ENDPOINT = "services.caddy.env.LOAD_ENDPOINT" +STOP_ENDPOINT = "services.caddy.env.STOP_ENDPOINT" +DEPS = "deps" +PORTS = "ports" +API_SERVICE = "services.api" +VIEW_SERVICE = "services.view" +SSH_KEY_SIZE = "ssh_key_size" +SSH_KEY_TYPE = "ssh_key_type" +SSH_FILE_PATH = "ssh_file_path" +VIEW_PORT = "services.view.env.NEXT_PUBLIC_PORT" +API_PORT = "services.api.env.PORT" +CADDY_CONFIG_VOLUME = "services.caddy.env.CADDY_CONFIG_VOLUME" +DOCKER_PORT = "services.api.env.DOCKER_PORT" diff --git a/cli/app/utils/lib.py b/cli/app/utils/lib.py new file mode 100644 index 00000000..b3ad2af3 --- /dev/null +++ b/cli/app/utils/lib.py @@ -0,0 +1,226 @@ +import os +import platform +import shutil +import stat +from concurrent.futures import ThreadPoolExecutor, as_completed +from enum import Enum +from typing import Callable, List, Optional, Tuple, TypeVar +import requests + +from app.utils.message import FAILED_TO_GET_PUBLIC_IP_MESSAGE, FAILED_TO_REMOVE_DIRECTORY_MESSAGE, REMOVED_DIRECTORY_MESSAGE + +T = TypeVar("T") +R = TypeVar("R") + + +class SupportedOS(str, Enum): + LINUX = "linux" + MACOS = "darwin" + + +class SupportedDistribution(str, Enum): + DEBIAN = "debian" + UBUNTU = "ubuntu" + CENTOS = "centos" + FEDORA = "fedora" + ALPINE = "alpine" + + +class SupportedPackageManager(str, Enum): + APT = "apt" + YUM = "yum" + DNF = "dnf" + PACMAN = "pacman" + APK = "apk" + BREW = "brew" + + +class Supported: + @staticmethod + def os(os_name: str) -> bool: + return os_name in [os.value for os in SupportedOS] + + @staticmethod + def distribution(distribution: str) -> bool: + return distribution in [dist.value for dist in SupportedDistribution] + + @staticmethod + def package_manager(package_manager: str) -> bool: + return package_manager in [pm.value for pm in SupportedPackageManager] + + @staticmethod + def get_os(): + return [os.value for os in SupportedOS] + + @staticmethod + def get_distributions(): + return [dist.value for dist in SupportedDistribution] + + +class HostInformation: + @staticmethod + def get_os_name(): + return platform.system().lower() + + @staticmethod + def get_package_manager(): + os_name = HostInformation.get_os_name() + + if os_name == SupportedOS.MACOS.value: + return SupportedPackageManager.BREW.value + + package_managers = [pm.value for pm in SupportedPackageManager if pm != SupportedPackageManager.BREW] + + for pm in package_managers: + if HostInformation.command_exists(pm): + return pm + raise RuntimeError("No supported package manager found on this system. Please install one or specify it manually.") + + @staticmethod + def command_exists(command): + return shutil.which(command) is not None + + @staticmethod + def get_public_ip(): + try: + response = requests.get('https://api.ipify.org', timeout=10) + response.raise_for_status() # fail on non-2xx + return response.text.strip() + except requests.RequestException: + raise Exception(FAILED_TO_GET_PUBLIC_IP_MESSAGE) + + +class ParallelProcessor: + @staticmethod + def process_items( + items: List[T], + processor_func: Callable[[T], R], + max_workers: int = 50, + error_handler: Callable[[T, Exception], R] = None, + ) -> List[R]: + if not items: + return [] + + results = [] + max_workers = min(len(items), max_workers) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = {executor.submit(processor_func, item): item for item in items} + + for future in as_completed(futures): + try: + result = future.result() + results.append(result) + except Exception as e: + item = futures[future] + if error_handler: + error_result = error_handler(item, e) + results.append(error_result) + return results + + +class DirectoryManager: + @staticmethod + def path_exists(path: str) -> bool: + return os.path.exists(path) + + @staticmethod + def path_exists_and_not_force(path: str, force: bool) -> bool: + return os.path.exists(path) and not force + + @staticmethod + def remove_directory(path: str, logger=None) -> bool: + if logger: + logger.debug(f"Attempting to remove directory: {path}") + logger.debug(f"Directory exists: {os.path.exists(path)}") + logger.debug(f"Directory is directory: {os.path.isdir(path) if os.path.exists(path) else 'N/A'}") + + try: + shutil.rmtree(path) + if logger: + logger.debug(REMOVED_DIRECTORY_MESSAGE.format(path=path)) + logger.debug(f"Directory {path} removed successfully") + return True + except Exception as e: + if logger: + logger.debug(f"Exception during directory removal: {type(e).__name__}: {str(e)}") + logger.error(FAILED_TO_REMOVE_DIRECTORY_MESSAGE.format(path=path, error=e)) + return False + + +class FileManager: + @staticmethod + def set_permissions(file_path: str, mode: int, logger=None) -> Tuple[bool, Optional[str]]: + try: + if logger: + logger.debug(f"Setting permissions {oct(mode)} on {file_path}") + + os.chmod(file_path, mode) + + if logger: + logger.debug("File permissions set successfully") + return True, None + except Exception as e: + error_msg = f"Failed to set permissions on {file_path}: {e}" + if logger: + logger.error(error_msg) + return False, error_msg + + @staticmethod + def create_directory( + path: str, mode: int = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, logger=None + ) -> Tuple[bool, Optional[str]]: + try: + if not os.path.exists(path): + os.makedirs(path, mode=mode) + if logger: + logger.debug(f"Created directory: {path}") + return True, None + except Exception as e: + error_msg = f"Failed to create directory {path}: {e}" + if logger: + logger.error(error_msg) + return False, error_msg + + @staticmethod + def append_to_file( + file_path: str, content: str, mode: int = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, logger=None + ) -> Tuple[bool, Optional[str]]: + try: + with open(file_path, "a") as f: + f.write(f"\n{content}\n") + + FileManager.set_permissions(file_path, mode, logger) + + if logger: + logger.debug(f"Content appended to {file_path}") + return True, None + except Exception as e: + error_msg = f"Failed to append to {file_path}: {e}" + if logger: + logger.error(error_msg) + return False, error_msg + + @staticmethod + def read_file_content(file_path: str, logger=None) -> Tuple[bool, Optional[str], Optional[str]]: + try: + with open(file_path, "r") as f: + content = f.read().strip() + return True, content, None + except Exception as e: + error_msg = f"Failed to read {file_path}: {e}" + if logger: + logger.error(error_msg) + return False, None, error_msg + + @staticmethod + def expand_user_path(path: str) -> str: + return os.path.expanduser(path) + + @staticmethod + def get_directory_path(file_path: str) -> str: + return os.path.dirname(file_path) + + @staticmethod + def get_public_key_path(private_key_path: str) -> str: + return f"{private_key_path}.pub" diff --git a/cli/app/utils/logger.py b/cli/app/utils/logger.py new file mode 100644 index 00000000..8416b9bc --- /dev/null +++ b/cli/app/utils/logger.py @@ -0,0 +1,51 @@ +import typer + +from .message import DEBUG_MESSAGE, ERROR_MESSAGE, HIGHLIGHT_MESSAGE, INFO_MESSAGE, SUCCESS_MESSAGE, WARNING_MESSAGE + + +class Logger: + """Wrapper for typer.secho to log messages to the console""" + + def __init__(self, verbose: bool = False, quiet: bool = False): + if verbose and quiet: + raise ValueError("Cannot have both verbose and quiet options enabled") + self.verbose = verbose + self.quiet = quiet + + def _should_print(self, require_verbose: bool = False) -> bool: + """Helper method to determine if message should be printed""" + if self.quiet: + return False + if require_verbose and not self.verbose: + return False + return True + + def info(self, message: str) -> None: + """Prints an info message""" + if self._should_print(): + typer.secho(INFO_MESSAGE.format(message=message), fg=typer.colors.BLUE) + + def debug(self, message: str) -> None: + """Prints a debug message if verbose is enabled""" + if self._should_print(require_verbose=True): + typer.secho(DEBUG_MESSAGE.format(message=message), fg=typer.colors.CYAN) + + def warning(self, message: str) -> None: + """Prints a warning message""" + if self._should_print(): + typer.secho(WARNING_MESSAGE.format(message=message), fg=typer.colors.YELLOW) + + def error(self, message: str) -> None: + """Prints an error message""" + if self._should_print(): + typer.secho(ERROR_MESSAGE.format(message=message), fg=typer.colors.RED) + + def success(self, message: str) -> None: + """Prints a success message""" + if self._should_print(): + typer.secho(SUCCESS_MESSAGE.format(message=message), fg=typer.colors.GREEN) + + def highlight(self, message: str) -> None: + """Prints a highlighted message""" + if self._should_print(): + typer.secho(HIGHLIGHT_MESSAGE.format(message=message), fg=typer.colors.MAGENTA) diff --git a/cli/app/utils/message.py b/cli/app/utils/message.py new file mode 100644 index 00000000..843f0638 --- /dev/null +++ b/cli/app/utils/message.py @@ -0,0 +1,18 @@ +# Global messages for the application + +# Application +application_name = "nixopus" +application_description = "Nixopus CLI - A powerful deployment and management tool" +application_no_args_is_help = True +application_add_completion = False +application_version_help = "Show version information" +INFO_MESSAGE = "INFO: {message}" +DEBUG_MESSAGE = "DEBUG: {message}" +WARNING_MESSAGE = "WARNING: {message}" +ERROR_MESSAGE = "ERROR: {message}" +SUCCESS_MESSAGE = "{message}" +HIGHLIGHT_MESSAGE = "{message}" +REMOVED_DIRECTORY_MESSAGE = "Removed existing directory: {path}" +FAILED_TO_REMOVE_DIRECTORY_MESSAGE = "Failed to remove directory: {path}" +MISSING_CONFIG_KEY_MESSAGE = "Missing config key: {path} (failed at '{key}')" +FAILED_TO_GET_PUBLIC_IP_MESSAGE = "Failed to get public IP" \ No newline at end of file diff --git a/cli/app/utils/output_formatter.py b/cli/app/utils/output_formatter.py new file mode 100644 index 00000000..a6d932a2 --- /dev/null +++ b/cli/app/utils/output_formatter.py @@ -0,0 +1,129 @@ +import json +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import BaseModel +from rich.console import Console +from rich.table import Table + + +class OutputMessage(BaseModel): + success: bool + message: str + data: Optional[Dict[str, Any]] = None + error: Optional[str] = None + + +class OutputFormatter: + def __init__(self, invalid_output_format_msg: str = "Invalid output format"): + self.invalid_output_format_msg = invalid_output_format_msg + self.console = Console() + + def format_text(self, result: Any) -> str: + if isinstance(result, OutputMessage): + if result.success: + return result.message + else: + return f"Error: {result.error or 'Unknown error'}" + elif isinstance(result, list): + return "\n".join([self.format_text(item) for item in result]) + else: + return str(result) + + def format_json(self, result: Any) -> str: + if isinstance(result, OutputMessage): + return json.dumps(result.model_dump(), indent=2) + elif isinstance(result, list): + return json.dumps([item.model_dump() if hasattr(item, "model_dump") else item for item in result], indent=2) + elif isinstance(result, BaseModel): + return json.dumps(result.model_dump(), indent=2) + else: + return json.dumps(result, indent=2) + + def format_output(self, result: Any, output: str) -> str: + if output == "text": + return self.format_text(result) + elif output == "json": + return self.format_json(result) + else: + raise ValueError(self.invalid_output_format_msg) + + def create_success_message(self, message: str, data: Optional[Dict[str, Any]] = None) -> OutputMessage: + return OutputMessage(success=True, message=message, data=data) + + def create_error_message(self, error: str, data: Optional[Dict[str, Any]] = None) -> OutputMessage: + return OutputMessage(success=False, message="", error=error, data=data) + + def create_table( + self, + data: Union[Dict[str, Any], List[Dict[str, Any]]], + title: Optional[str] = None, + headers: Optional[Union[Tuple[str, str], List[str]]] = None, + show_header: bool = True, + show_lines: bool = False, + column_styles: Optional[List[str]] = None, + ) -> str: + if not data: + return "No data to display" + + table = Table(show_header=show_header, show_lines=show_lines) + + if title: + table.title = title + + if isinstance(data, dict): + if headers is None: + headers = ("Key", "Value") + + if isinstance(headers, list): + headers = tuple(headers[:2]) + + if column_styles is None: + column_styles = ["cyan", "magenta"] + + table.add_column(headers[0], style=column_styles[0], no_wrap=True) + table.add_column(headers[1], style=column_styles[1]) + + for key, value in sorted(data.items()): + table.add_row(str(key), str(value)) + + elif isinstance(data, list) and data: + if headers is None: + headers = list(data[0].keys()) + elif isinstance(headers, tuple): + headers = list(headers) + + if column_styles is None: + column_styles = ["cyan", "magenta", "green", "yellow", "blue", "red"] * (len(headers) // 6 + 1) + + for i, header in enumerate(headers): + style = column_styles[i] if i < len(column_styles) else "white" + table.add_column(str(header), style=style) + + for row in data: + row_data = [str(row.get(header, "")) for header in headers] + table.add_row(*row_data) + + with self.console.capture() as capture: + self.console.print(table) + + return capture.get() + + def format_table_output( + self, + data: Union[Dict[str, str], List[Dict[str, Any]]], + output_format: str, + success_message: str, + title: Optional[str] = None, + headers: Optional[Union[Tuple[str, str], List[str]]] = None, + ) -> str: + if output_format == "json": + return self.format_json({ + "success": True, + "message": success_message, + "data": data + }) + else: + if not data: + return "No data to display" + + return self.create_table(data, title, headers).strip() diff --git a/cli/app/utils/protocols.py b/cli/app/utils/protocols.py new file mode 100644 index 00000000..6f4693f8 --- /dev/null +++ b/cli/app/utils/protocols.py @@ -0,0 +1,14 @@ +from typing import Protocol + + +class LoggerProtocol(Protocol): + def debug(self, message: str) -> None: ... + def info(self, message: str) -> None: ... + def warning(self, message: str) -> None: ... + def error(self, message: str) -> None: ... + def success(self, message: str) -> None: ... + def highlight(self, message: str) -> None: ... + + +class DockerServiceProtocol(Protocol): + def execute_services(self, name: str, env_file: str = None, compose_file: str = None) -> tuple[bool, str]: ... diff --git a/cli/app/utils/timeout.py b/cli/app/utils/timeout.py new file mode 100644 index 00000000..e03f0c09 --- /dev/null +++ b/cli/app/utils/timeout.py @@ -0,0 +1,24 @@ +import signal +from app.commands.install.messages import timeout_error + + +class TimeoutWrapper: + """Context manager for timeout operations""" + + def __init__(self, timeout: int): + self.timeout = timeout + self.original_handler = None + + def __enter__(self): + if self.timeout > 0: + def timeout_handler(signum, frame): + raise TimeoutError(timeout_error.format(timeout=self.timeout)) + + self.original_handler = signal.signal(signal.SIGALRM, timeout_handler) + signal.alarm(self.timeout) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.timeout > 0: + signal.alarm(0) + signal.signal(signal.SIGALRM, self.original_handler) diff --git a/cli/build.sh b/cli/build.sh new file mode 100755 index 00000000..ff5e0341 --- /dev/null +++ b/cli/build.sh @@ -0,0 +1,315 @@ +#!/bin/bash + +set -e + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +APP_NAME="nixopus" +BUILD_DIR="dist" +BINARY_DIR="binaries" +SPEC_FILE="nixopus.spec" + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +check_requirements() { + log_info "Checking requirements..." + + if ! command -v poetry &> /dev/null; then + log_error "Poetry is not installed. Please install Poetry first." + exit 1 + fi + + if ! command -v python3 &> /dev/null; then + log_error "Python3 is not installed." + exit 1 + fi + + log_success "All requirements met" +} + +setup_environment() { + log_info "Setting up build environment..." + + if ! poetry check; then + log_info "Updating poetry lock file..." + poetry lock + fi + + poetry install + + if ! poetry run python -c "import PyInstaller" &> /dev/null; then + log_info "Installing PyInstaller..." + poetry add --group dev pyinstaller + fi + + log_success "Environment setup complete" +} + +create_spec_file() { + log_info "Creating PyInstaller spec file..." + + cat > $SPEC_FILE << 'EOF' +# -*- mode: python ; coding: utf-8 -*- + +block_cipher = None + +a = Analysis( + ['app/main.py'], + pathex=[], + binaries=[], + datas=[ + ('../helpers/config.prod.yaml', 'helpers/'), + ], + hiddenimports=[ + 'app.commands.clone.command', + 'app.commands.conf.command', + 'app.commands.install.command', + 'app.commands.preflight.command', + 'app.commands.proxy.command', + 'app.commands.service.command', + 'app.commands.test.command', + 'app.commands.uninstall.command', + 'app.commands.version.command', + ], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False, +) + +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) + +exe = EXE( + pyz, + a.scripts, + [], + exclude_binaries=True, + name='nixopus', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) + +coll = COLLECT( + exe, + a.binaries, + a.zipfiles, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='nixopus' +) +EOF + + log_success "Spec file created: $SPEC_FILE" +} + +build_wheel() { + log_info "Building wheel package..." + + poetry build + + log_success "Wheel package built in $BUILD_DIR/" +} + +build_binary() { + log_info "Building binary" + + poetry run pyinstaller --clean --noconfirm $SPEC_FILE + + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + ARCH=$(uname -m) + + case $ARCH in + x86_64) ARCH="amd64" ;; + aarch64|arm64) ARCH="arm64" ;; + esac + + BINARY_DIR_NAME="${APP_NAME}_${OS}_${ARCH}" + + + if [[ -d "$BUILD_DIR/$APP_NAME" ]]; then + mv "$BUILD_DIR/$APP_NAME" "$BUILD_DIR/$BINARY_DIR_NAME" + + + cat > "$BUILD_DIR/$APP_NAME" << EOF +#!/bin/bash +# Nixopus CLI wrapper +SCRIPT_DIR="\$(cd "\$(dirname "\${BASH_SOURCE[0]}")" && pwd)" +exec "\$SCRIPT_DIR/$BINARY_DIR_NAME/$APP_NAME" "\$@" +EOF + chmod +x "$BUILD_DIR/$APP_NAME" + + log_success "Binary directory built: $BUILD_DIR/$BINARY_DIR_NAME/" + log_success "Wrapper script created: $BUILD_DIR/$APP_NAME" + else + log_error "Build failed - directory $BUILD_DIR/$APP_NAME not found" + exit 1 + fi +} + +test_binary() { + + log_info "Testing binary..." + + WRAPPER_PATH="$BUILD_DIR/$APP_NAME" + + if [[ -f "$WRAPPER_PATH" ]]; then + chmod +x "$WRAPPER_PATH" + + if "$WRAPPER_PATH" --version; then + log_success "Binary test passed" + else + log_error "Binary test failed" + exit 1 + fi + else + log_error "Wrapper script not found for testing: $WRAPPER_PATH" + exit 1 + fi +} + +create_release_archive() { + log_info "Creating release archive..." + + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + ARCH=$(uname -m) + + case $ARCH in + x86_64) ARCH="amd64" ;; + aarch64|arm64) ARCH="arm64" ;; + esac + + ARCHIVE_NAME="${APP_NAME}_${OS}_${ARCH}" + BINARY_DIR_NAME="${APP_NAME}_${OS}_${ARCH}" + + cd $BUILD_DIR + + + if [[ "$OS" == "darwin" || "$OS" == "linux" ]]; then + tar -czf "${ARCHIVE_NAME}.tar.gz" "$BINARY_DIR_NAME" "$APP_NAME" + log_success "Archive created: $BUILD_DIR/${ARCHIVE_NAME}.tar.gz" + elif [[ "$OS" == "mingw"* || "$OS" == "cygwin"* || "$OS" == "msys"* ]]; then + zip -r "${ARCHIVE_NAME}.zip" "$BINARY_DIR_NAME" "$APP_NAME" + log_success "Archive created: $BUILD_DIR/${ARCHIVE_NAME}.zip" + fi + + cd .. +} + +cleanup() { + log_info "Cleaning up temporary files..." + rm -rf build/ + rm -f $SPEC_FILE + log_success "Cleanup complete" +} + +show_usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --no-test Skip binary testing" + echo " --no-archive Skip creating release archive" + echo " --no-cleanup Skip cleanup of temporary files" + echo " --help Show this help message" + echo "" + echo "Example:" + echo " $0 # Full build with all steps" + echo " $0 --no-test # Build without testing" + echo " $0 --no-archive # Build without creating archive" +} + +main() { + local skip_test=false + local skip_archive=false + local skip_cleanup=false + + while [[ $# -gt 0 ]]; do + case $1 in + --no-test) + skip_test=true + shift + ;; + --no-archive) + skip_archive=true + shift + ;; + --no-cleanup) + skip_cleanup=true + shift + ;; + --help) + show_usage + exit 0 + ;; + *) + log_error "Unknown option: $1" + show_usage + exit 1 + ;; + esac + done + + log_info "Starting Nixopus CLI binary build process..." + + check_requirements + setup_environment + create_spec_file + build_wheel + build_binary + + if [[ $skip_test == false ]]; then + test_binary + fi + + if [[ $skip_archive == false ]]; then + create_release_archive + fi + + if [[ $skip_cleanup == false ]]; then + cleanup + fi + + log_success "Build process completed!" + log_info "Binary location: $BUILD_DIR/" + + if [[ -d "$BUILD_DIR" ]]; then + echo "" + log_info "Built binaries:" + ls -la $BUILD_DIR/ + fi +} + +main "$@" \ No newline at end of file diff --git a/cli/install.sh b/cli/install.sh new file mode 100755 index 00000000..a9a384fe --- /dev/null +++ b/cli/install.sh @@ -0,0 +1,247 @@ +#!/bin/bash + +set -e + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +APP_NAME="nixopus" +INSTALL_DIR="/usr/local/bin" +BUILD_DIR="dist" + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +show_usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --local Install to ~/.local/bin instead of /usr/local/bin" + echo " --dir DIR Install to custom directory" + echo " --no-path Don't automatically update PATH in shell profile" + echo " --help Show this help message" + echo "" + echo "Examples:" + echo " $0 # Install to /usr/local/bin (requires sudo)" + echo " $0 --local # Install to ~/.local/bin (no sudo required)" + echo " $0 --dir ~/bin # Install to custom directory" + echo " $0 --local --no-path # Install locally but don't update PATH" +} + +detect_binary() { + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + ARCH=$(uname -m) + + case $ARCH in + x86_64) ARCH="amd64" ;; + aarch64|arm64) ARCH="arm64" ;; + esac + + BINARY_NAME="${APP_NAME}_${OS}_${ARCH}" + + if [[ "$OS" == "mingw"* || "$OS" == "cygwin"* || "$OS" == "msys"* ]]; then + BINARY_NAME="${BINARY_NAME}.exe" + fi + + BINARY_PATH="$BUILD_DIR/$BINARY_NAME" + + if [[ ! -f "$BINARY_PATH" ]]; then + log_error "Binary not found: $BINARY_PATH" + log_info "Please run './build.sh' first to build the binary" + exit 1 + fi + + log_info "Found binary: $BINARY_PATH" +} + +install_binary() { + log_info "Installing $APP_NAME to $INSTALL_DIR..." + + if [[ ! -d "$INSTALL_DIR" ]]; then + log_info "Creating directory: $INSTALL_DIR" + mkdir -p "$INSTALL_DIR" + fi + + if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ $EUID -ne 0 ]]; then + log_info "Installing to system directory requires sudo..." + sudo cp "$BINARY_PATH" "$INSTALL_DIR/$APP_NAME" + sudo chmod +x "$INSTALL_DIR/$APP_NAME" + else + cp "$BINARY_PATH" "$INSTALL_DIR/$APP_NAME" + chmod +x "$INSTALL_DIR/$APP_NAME" + fi + + log_success "$APP_NAME installed to $INSTALL_DIR/$APP_NAME" +} + +update_shell_profile() { + shell_profile="" + local current_shell=$(basename "$SHELL") + + case $current_shell in + bash) + if [[ -f "$HOME/.bash_profile" ]]; then + shell_profile="$HOME/.bash_profile" + elif [[ -f "$HOME/.bashrc" ]]; then + shell_profile="$HOME/.bashrc" + else + shell_profile="$HOME/.bash_profile" + fi + ;; + zsh) + shell_profile="$HOME/.zshrc" + ;; + fish) + shell_profile="$HOME/.config/fish/config.fish" + ;; + *) + shell_profile="$HOME/.profile" + ;; + esac + + log_info "Detected shell: $current_shell" + log_info "Using profile: $shell_profile" + + return 0 +} + +add_to_path() { + if [[ ":$PATH:" == *":$INSTALL_DIR:"* ]]; then + log_success "$INSTALL_DIR is already in your PATH" + return 0 + fi + + update_shell_profile + local shell_profile_used=$shell_profile + + mkdir -p "$(dirname "$shell_profile_used")" + + if [[ -f "$shell_profile_used" ]] && grep -q "export PATH.*$INSTALL_DIR" "$shell_profile_used"; then + log_info "PATH entry already exists in $shell_profile_used" + return 0 + fi + + log_info "Adding $INSTALL_DIR to PATH in $shell_profile_used..." + + { + echo "" + echo "# Added by nixopus installer" + echo "export PATH=\"$INSTALL_DIR:\$PATH\"" + } >> "$shell_profile_used" + + log_success "Added $INSTALL_DIR to PATH in $shell_profile_used" + + log_info "Updating PATH for current session..." + export PATH="$INSTALL_DIR:$PATH" + log_success "PATH updated for current session" + + if [[ -f "$shell_profile_used" ]]; then + log_info "Sourcing $shell_profile_used for future sessions..." + source "$shell_profile_used" 2>/dev/null || true + fi + + return 0 +} + +test_installation() { + log_info "Testing installation..." + + if command -v "$APP_NAME" &> /dev/null; then + if "$APP_NAME" --version; then + log_success "Installation test passed!" + echo "" + log_info "You can now use: $APP_NAME --help" + log_info "The command is available in new shell sessions or by running:" + log_info " export PATH=\"$INSTALL_DIR:\$PATH\" && $APP_NAME --help" + else + log_error "Installation test failed - binary exists but doesn't work" + exit 1 + fi + else + log_warning "Command '$APP_NAME' not found in PATH" + log_info "You may need to restart your shell or update your PATH" + log_info "You can run directly: $INSTALL_DIR/$APP_NAME --help" + fi +} + +main() { + local use_local=false + local custom_dir="" + local skip_path=false + + while [[ $# -gt 0 ]]; do + case $1 in + --local) + use_local=true + shift + ;; + --dir) + custom_dir="$2" + shift 2 + ;; + --no-path) + skip_path=true + shift + ;; + --help) + show_usage + exit 0 + ;; + *) + log_error "Unknown option: $1" + show_usage + exit 1 + ;; + esac + done + + if [[ -n "$custom_dir" ]]; then + INSTALL_DIR="$custom_dir" + elif [[ "$use_local" == true ]]; then + INSTALL_DIR="$HOME/.local/bin" + fi + + log_info "Starting $APP_NAME installation..." + log_info "Target directory: $INSTALL_DIR" + + detect_binary + install_binary + + if [[ "$skip_path" == false ]]; then + add_to_path + else + log_info "Skipping PATH update (--no-path specified)" + if [[ ":$PATH:" != *":$INSTALL_DIR:"* ]]; then + log_warning "$INSTALL_DIR is not in your PATH" + log_info "You can run: $INSTALL_DIR/$APP_NAME --help" + fi + fi + + test_installation + + log_success "Installation completed!" + echo "" + log_info "To use nixopus immediately in this session:" + echo " export PATH=\"$INSTALL_DIR:\$PATH\"" + echo " nixopus --help" + echo "" + log_info "Or open a new shell session and run: nixopus --help" +} + +main "$@" \ No newline at end of file diff --git a/cli/perf_check.sh b/cli/perf_check.sh new file mode 100644 index 00000000..16a367d6 --- /dev/null +++ b/cli/perf_check.sh @@ -0,0 +1,16 @@ +# TODO: @shravan20 - Delte before merging to master or feat/dev +echo "itr \tBinary RUN\t Poetry Run" + +x=() +y=() + + +for i in {1..3}; do + x[$i]=$( (time ./dist/nixopus --help > /dev/null 2>&1) 2>&1 | grep real | awk '{print $2}' ) + echo "$i\t${x[$i]}\t\t-" +done + +for i in {1..3}; do + y[$i]=$( (time poetry run nixopus --help > /dev/null 2>&1) 2>&1 | grep real | awk '{print $2}' ) + echo "$i\t-\t\t ${y[$i]}" +done \ No newline at end of file diff --git a/cli/pyproject.toml b/cli/pyproject.toml new file mode 100644 index 00000000..bc908350 --- /dev/null +++ b/cli/pyproject.toml @@ -0,0 +1,66 @@ +[tool.poetry] +name = "nixopus" +version = "0.1.0" +description = "A CLI for Nixopus" +authors = ["Nixopus "] +readme = "README.md" +packages = [{include = "app"}] + +[tool.poetry.dependencies] +python = ">=3.9.0,<3.14" +typer = "^0.16.0" +rich = "^14.0.0" +pydantic = "^2.0.0" +packaging = "^23.0" +requests = "^2.32.4" +pyyaml = "^6.0.2" + +[tool.poetry.group.dev.dependencies] +pytest = "^8.4.1" +pytest-cov = "^6.2.1" +pytest-watch = "^4.2.0" +flake8 = "^7.3.0" +black = "^25.1.0" +isort = "^6.0.1" +pyinstaller = "^6.14.2" + +[tool.poetry.scripts] +nixopus = "app.main:app" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = "-v --cov=app --cov-report=term-missing" + +[tool.black] +line-length = 127 +target-version = ['py38'] +include = '\.pyi?$' +extend-exclude = ''' +/( + # directories + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | build + | dist +)/ +''' + +[tool.isort] +profile = "black" +line_length = 127 +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true diff --git a/cli/tests/__init__.py b/cli/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/__init__.py b/cli/tests/commands/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/clone/__init__.py b/cli/tests/commands/clone/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/clone/test_clone.py b/cli/tests/commands/clone/test_clone.py new file mode 100644 index 00000000..db6b257e --- /dev/null +++ b/cli/tests/commands/clone/test_clone.py @@ -0,0 +1,455 @@ +import subprocess +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.clone.clone import ( + Clone, + CloneConfig, + CloneFormatter, + CloneResult, + CloneService, + GitClone, + GitCommandBuilder, +) +from app.commands.clone.messages import ( + successfully_cloned, + dry_run_mode, + dry_run_command, + dry_run_force_mode, + path_exists_will_overwrite, + path_exists_would_fail, +) +from app.utils.lib import DirectoryManager +from app.utils.logger import Logger + + +class TestGitCommandBuilder: + def test_build_clone_command_without_branch(self): + cmd = GitCommandBuilder.build_clone_command("https://github.com/user/repo", "/path/to/clone") + assert cmd == ["git", "clone", "https://github.com/user/repo", "/path/to/clone"] + + def test_build_clone_command_with_branch(self): + cmd = GitCommandBuilder.build_clone_command("https://github.com/user/repo", "/path/to/clone", "main") + assert cmd == ["git", "clone", "-b", "main", "https://github.com/user/repo", "/path/to/clone"] + + def test_build_clone_command_with_empty_branch(self): + cmd = GitCommandBuilder.build_clone_command("https://github.com/user/repo", "/path/to/clone", "") + assert cmd == ["git", "clone", "https://github.com/user/repo", "/path/to/clone"] + + +class TestCloneFormatter: + def setup_method(self): + self.formatter = CloneFormatter() + + def test_format_output_success(self): + result = CloneResult( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=False, + output="text", + success=True, + ) + formatted = self.formatter.format_output(result, "text") + assert successfully_cloned.format(repo="https://github.com/user/repo", path="/path/to/clone") in formatted + + def test_format_output_failure(self): + result = CloneResult( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=False, + output="text", + success=False, + error="Repository not found", + ) + formatted = self.formatter.format_output(result, "text") + assert "Error: Repository not found" in formatted + + def test_format_output_json(self): + result = CloneResult( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=False, + output="json", + success=True, + ) + formatted = self.formatter.format_output(result, "json") + import json + + data = json.loads(formatted) + assert data["success"] is True + assert data["message"] == successfully_cloned.format(repo="https://github.com/user/repo", path="/path/to/clone") + + def test_format_output_invalid(self): + result = CloneResult( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=False, + output="invalid", + success=True, + ) + with pytest.raises(ValueError): + self.formatter.format_output(result, "invalid") + + @patch("os.path.exists") + def test_format_dry_run(self, mock_exists): + mock_exists.return_value = False + config = CloneConfig( + repo="https://github.com/user/repo", path="/path/to/clone", branch="main", force=True, dry_run=True + ) + formatted = self.formatter.format_dry_run(config) + assert dry_run_mode in formatted + assert dry_run_command.format(command="git clone -b main https://github.com/user/repo /path/to/clone") in formatted + assert dry_run_force_mode.format(force=True) in formatted + + @patch("os.path.exists") + def test_format_dry_run_path_exists_force(self, mock_exists): + mock_exists.return_value = True + config = CloneConfig( + repo="https://github.com/user/repo", path="/path/to/clone", branch="main", force=True, dry_run=True + ) + formatted = self.formatter.format_dry_run(config) + assert path_exists_will_overwrite.format(path="/path/to/clone") in formatted + + @patch("os.path.exists") + def test_format_dry_run_path_exists_no_force(self, mock_exists): + mock_exists.return_value = True + config = CloneConfig( + repo="https://github.com/user/repo", path="/path/to/clone", branch="main", force=False, dry_run=True + ) + formatted = self.formatter.format_dry_run(config) + assert path_exists_would_fail.format(path="/path/to/clone") in formatted + + +class TestGitClone: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.git_clone = GitClone(self.logger) + + @patch("subprocess.run") + def test_clone_repository_success(self, mock_run): + mock_run.return_value = Mock(returncode=0) + + success, error = self.git_clone.clone_repository("https://github.com/user/repo", "/path/to/clone", "main") + + assert success is True + assert error is None + self.logger.debug.assert_called() + + @patch("subprocess.run") + def test_clone_repository_without_branch(self, mock_run): + mock_run.return_value = Mock(returncode=0) + + success, error = self.git_clone.clone_repository("https://github.com/user/repo", "/path/to/clone") + + assert success is True + assert error is None + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["git", "clone", "https://github.com/user/repo", "/path/to/clone"] + + @patch("subprocess.run") + def test_clone_repository_failure(self, mock_run): + mock_run.side_effect = subprocess.CalledProcessError(1, "git clone", stderr="Repository not found") + + success, error = self.git_clone.clone_repository("https://github.com/user/repo", "/path/to/clone") + + assert success is False + assert error == "Repository not found" + + @patch("subprocess.run") + def test_clone_repository_unexpected_error(self, mock_run): + mock_run.side_effect = Exception("Unexpected error") + + success, error = self.git_clone.clone_repository("https://github.com/user/repo", "/path/to/clone") + + assert success is False + assert error == "Unexpected error" + + +class TestCloneConfig: + def test_valid_config(self): + config = CloneConfig(repo="https://github.com/user/repo", path="/path/to/clone", branch="main") + assert config.repo == "https://github.com/user/repo" + assert config.path == "/path/to/clone" + assert config.branch == "main" + assert config.force is False + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + + def test_valid_repo_formats(self): + valid_repos = [ + "https://github.com/user/repo", + "http://github.com/user/repo", + "git://github.com/user/repo", + "ssh://github.com/user/repo", + "git@github.com:user/repo.git", + "https://github.com/user/repo.git", + ] + + for repo in valid_repos: + config = CloneConfig(repo=repo, path="/path/to/clone") + assert config.repo == repo + + def test_invalid_repo_formats(self): + invalid_repos = ["", " ", "github.com:user/repo", "invalid://github.com/user/repo"] + + for repo in invalid_repos: + with pytest.raises(ValidationError): + CloneConfig(repo=repo, path="/path/to/clone") + + def test_empty_repo(self): + with pytest.raises(ValidationError): + CloneConfig(repo="", path="/path/to/clone") + + def test_empty_path(self): + with pytest.raises(ValidationError): + CloneConfig(repo="https://github.com/user/repo", path="") + + def test_branch_validation(self): + config = CloneConfig(repo="https://github.com/user/repo", path="/path/to/clone", branch=" ") + assert config.branch is None + + def test_is_valid_repo_format(self): + valid_repos = [ + "https://github.com/user/repo", + "http://github.com/user/repo", + "git@github.com:user/repo.git", + "https://github.com/user/repo.git", + ] + + for repo in valid_repos: + assert CloneConfig._is_valid_repo_format(repo) is True + + invalid_repos = ["github.com:user/repo", "invalid://github.com/user/repo"] + + for repo in invalid_repos: + assert CloneConfig._is_valid_repo_format(repo) is False + + +class TestDirectoryManager: + def setup_method(self): + self.logger = Mock(spec=Logger) + + @patch("shutil.rmtree") + def test_remove_directory_success(self, mock_rmtree): + success = DirectoryManager.remove_directory("/path/to/remove", self.logger) + + assert success is True + mock_rmtree.assert_called_once_with("/path/to/remove") + self.logger.debug.assert_called() + + @patch("shutil.rmtree") + def test_remove_directory_failure(self, mock_rmtree): + mock_rmtree.side_effect = Exception("Permission denied") + + success = DirectoryManager.remove_directory("/path/to/remove", self.logger) + + assert success is False + self.logger.error.assert_called_once() + + @patch("os.path.exists") + def test_path_exists_and_not_force_true(self, mock_exists): + mock_exists.return_value = True + + result = DirectoryManager.path_exists_and_not_force("/path/to/check", False) + + assert result is True + + @patch("os.path.exists") + def test_path_exists_and_not_force_false_when_force(self, mock_exists): + mock_exists.return_value = True + + result = DirectoryManager.path_exists_and_not_force("/path/to/check", True) + + assert result is False + + @patch("os.path.exists") + def test_path_exists_and_not_force_false_when_not_exists(self, mock_exists): + mock_exists.return_value = False + + result = DirectoryManager.path_exists_and_not_force("/path/to/check", False) + + assert result is False + + +class TestCloneService: + def setup_method(self): + self.config = CloneConfig(repo="https://github.com/user/repo", path="/path/to/clone", branch="main") + self.logger = Mock(spec=Logger) + self.cloner = Mock(spec=GitClone) + self.service = CloneService(self.config, self.logger, self.cloner) + + def test_create_result_success(self): + result = self.service._create_result(True) + + assert result.repo == self.config.repo + assert result.path == self.config.path + assert result.branch == self.config.branch + assert result.success is True + assert result.error is None + + def test_create_result_failure(self): + result = self.service._create_result(False, "Test error") + + assert result.success is False + assert result.error == "Test error" + + @patch("os.path.exists") + def test_validate_prerequisites_success(self, mock_exists): + mock_exists.return_value = False + + result = self.service._validate_prerequisites() + + assert result is True + + @patch("os.path.exists") + def test_validate_prerequisites_path_exists_no_force(self, mock_exists): + mock_exists.return_value = True + + result = self.service._validate_prerequisites() + + assert result is False + self.logger.error.assert_called_once() + + @patch("os.path.exists") + def test_prepare_target_directory_force_success(self, mock_exists): + self.service.config.force = True + mock_exists.return_value = True + self.service.dir_manager.remove_directory = Mock(return_value=True) + + result = self.service._prepare_target_directory() + + assert result is True + self.service.dir_manager.remove_directory.assert_called_once_with(self.config.path, self.logger) + + @patch("os.path.exists") + def test_prepare_target_directory_force_failure(self, mock_exists): + self.service.config.force = True + mock_exists.return_value = True + self.service.dir_manager.remove_directory = Mock(return_value=False) + + result = self.service._prepare_target_directory() + + assert result is False + self.service.dir_manager.remove_directory.assert_called_once_with(self.config.path, self.logger) + + def test_clone_success(self): + self.cloner.clone_repository.return_value = (True, None) + + result = self.service.clone() + + assert result.success is True + self.cloner.clone_repository.assert_called_once_with(self.config.repo, self.config.path, self.config.branch) + + def test_clone_failure(self): + self.cloner.clone_repository.return_value = (False, "Test error") + + result = self.service.clone() + + assert result.success is False + assert result.error == "Test error" + + def test_clone_and_format_dry_run(self): + self.config.dry_run = True + + result = self.service.clone_and_format() + + assert dry_run_mode in result + + def test_clone_and_format_success(self): + self.cloner.clone_repository.return_value = (True, None) + + result = self.service.clone_and_format() + + assert successfully_cloned.format(repo=self.config.repo, path=self.config.path) in result + + +class TestClone: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.clone = Clone(self.logger) + + def test_clone_success(self): + config = CloneConfig(repo="https://github.com/user/repo", path="/path/to/clone", branch="main") + + with patch.object(CloneService, "clone") as mock_clone: + mock_result = CloneResult( + repo=config.repo, + path=config.path, + branch=config.branch, + force=config.force, + verbose=config.verbose, + output=config.output, + success=True, + ) + mock_clone.return_value = mock_result + + result = self.clone.clone(config) + + assert result.success is True + + def test_format_output(self): + result = CloneResult( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=False, + output="text", + success=True, + ) + + formatted = self.clone.format_output(result, "text") + + assert successfully_cloned.format(repo="https://github.com/user/repo", path="/path/to/clone") in formatted + + def test_clone_and_format(self): + config = CloneConfig( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=False, + output="text", + dry_run=True, + ) + + with patch.object(CloneService, "clone_and_format") as mock_clone_and_format: + mock_clone_and_format.return_value = dry_run_mode + + formatted = self.clone.clone_and_format(config) + + assert dry_run_mode in formatted + + def test_debug_logging_enabled(self): + """Test that debug logging is properly enabled when verbose=True""" + config = CloneConfig( + repo="https://github.com/user/repo", + path="/path/to/clone", + branch="main", + force=False, + verbose=True, + output="text", + dry_run=False, + ) + + logger = Mock(spec=Logger) + clone_operation = Clone(logger=logger) + + # Patch only GitClone.clone_repository to simulate a successful clone + with patch("app.commands.clone.clone.GitClone.clone_repository", return_value=(True, None)): + result = clone_operation.clone(config) + + # Verify that debug logging was called + assert logger.debug.called diff --git a/cli/tests/commands/conf/__init__.py b/cli/tests/commands/conf/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/conf/test_base.py b/cli/tests/commands/conf/test_base.py new file mode 100644 index 00000000..8023dce2 --- /dev/null +++ b/cli/tests/commands/conf/test_base.py @@ -0,0 +1,416 @@ +import os +import shutil +import tempfile +from unittest.mock import Mock, mock_open, patch + +import pytest +from pydantic import ValidationError + +from app.commands.conf.base import BaseAction, BaseConfig, BaseEnvironmentManager, BaseResult, BaseService +from app.utils.logger import Logger + + +class TestBaseEnvironmentManager: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.manager = BaseEnvironmentManager(self.logger) + + @patch("os.path.exists") + def test_read_env_file_exists(self, mock_exists): + mock_exists.return_value = True + + with patch("builtins.open", mock_open(read_data="KEY1=value1\nKEY2=value2\n")): + success, config, error = self.manager.read_env_file("/path/to/.env") + + assert success is True + assert config == {"KEY1": "value1", "KEY2": "value2"} + assert error is None + + @patch("os.path.exists") + def test_read_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + + success, config, error = self.manager.read_env_file("/path/to/.env") + + assert success is False + assert config == {} + assert "Environment file not found" in error + + @patch("os.path.exists") + def test_read_env_file_with_comments_and_empty_lines(self, mock_exists): + mock_exists.return_value = True + + content = "# Comment line\nKEY1=value1\n\nKEY2=value2\n# Another comment" + with patch("builtins.open", mock_open(read_data=content)): + success, config, error = self.manager.read_env_file("/path/to/.env") + + assert success is True + assert config == {"KEY1": "value1", "KEY2": "value2"} + assert error is None + + @patch("os.path.exists") + def test_read_env_file_with_invalid_line(self, mock_exists): + mock_exists.return_value = True + + content = "KEY1=value1\nINVALID_LINE\nKEY2=value2" + with patch("builtins.open", mock_open(read_data=content)): + success, config, error = self.manager.read_env_file("/path/to/.env") + + assert success is True + assert config == {"KEY1": "value1", "KEY2": "value2"} + assert error is None + self.logger.warning.assert_called_once() + + @patch("os.path.exists") + def test_create_backup_file_exists(self, mock_exists): + mock_exists.return_value = True + + with patch("shutil.copy2") as mock_copy: + success, backup_path, error = self.manager._create_backup("/path/to/.env") + + assert success is True + assert backup_path == "/path/to/.env.backup" + assert error is None + mock_copy.assert_called_once_with("/path/to/.env", "/path/to/.env.backup") + + @patch("os.path.exists") + def test_create_backup_file_not_exists(self, mock_exists): + mock_exists.return_value = False + + success, backup_path, error = self.manager._create_backup("/path/to/.env") + + assert success is True + assert backup_path is None + assert error is None + + @patch("os.path.exists") + def test_create_backup_failure(self, mock_exists): + mock_exists.return_value = True + + with patch("shutil.copy2", side_effect=Exception("Copy failed")): + success, backup_path, error = self.manager._create_backup("/path/to/.env") + + assert success is False + assert backup_path is None + assert "Failed to create backup" in error + + @patch("os.path.exists") + def test_restore_backup_success(self, mock_exists): + mock_exists.return_value = True + + with patch("shutil.copy2") as mock_copy: + with patch("os.remove") as mock_remove: + success, error = self.manager._restore_backup("/path/to/.env.backup", "/path/to/.env") + + assert success is True + assert error is None + mock_copy.assert_called_once_with("/path/to/.env.backup", "/path/to/.env") + mock_remove.assert_called_once_with("/path/to/.env.backup") + + @patch("os.path.exists") + def test_restore_backup_not_exists(self, mock_exists): + mock_exists.return_value = False + + success, error = self.manager._restore_backup("/path/to/.env.backup", "/path/to/.env") + + assert success is False + assert error == "Backup file not found" + + @patch("os.path.exists") + def test_restore_backup_failure(self, mock_exists): + mock_exists.return_value = True + + with patch("shutil.copy2", side_effect=Exception("Copy failed")): + success, error = self.manager._restore_backup("/path/to/.env.backup", "/path/to/.env") + + assert success is False + assert "Failed to restore from backup" in error + + @patch("os.makedirs") + @patch("tempfile.NamedTemporaryFile") + @patch("os.replace") + @patch("os.fsync") + def test_atomic_write_success(self, mock_fsync, mock_replace, mock_tempfile, mock_makedirs): + config = {"KEY2": "value2", "KEY1": "value1"} + + mock_temp = Mock() + mock_temp.name = "/tmp/temp_file" + mock_temp.fileno.return_value = 123 + mock_tempfile.return_value.__enter__.return_value = mock_temp + mock_tempfile.return_value.__exit__.return_value = None + + success, error = self.manager._atomic_write("/path/to/.env", config) + + assert success is True + assert error is None + mock_makedirs.assert_called_once_with("/path/to", exist_ok=True) + mock_temp.write.assert_called() + mock_temp.flush.assert_called_once() + mock_temp.fileno.assert_called_once() + mock_replace.assert_called_once_with("/tmp/temp_file", "/path/to/.env") + + @patch("os.makedirs") + @patch("tempfile.NamedTemporaryFile") + def test_atomic_write_failure(self, mock_tempfile, mock_makedirs): + config = {"KEY1": "value1"} + + mock_tempfile.side_effect = Exception("Temp file creation failed") + + success, error = self.manager._atomic_write("/path/to/.env", config) + + assert success is False + assert "Failed to write environment file" in error + + @patch("os.makedirs") + @patch("tempfile.NamedTemporaryFile") + @patch("os.replace") + @patch("os.fsync") + def test_atomic_write_simple(self, mock_fsync, mock_replace, mock_tempfile, mock_makedirs): + config = {"KEY1": "value1"} + + mock_temp = Mock() + mock_temp.name = "/tmp/temp_file" + mock_temp.fileno.return_value = 123 + mock_tempfile.return_value.__enter__.return_value = mock_temp + mock_tempfile.return_value.__exit__.return_value = None + + success, error = self.manager._atomic_write("/path/to/.env", config) + + assert success is True + assert error is None + + @patch("os.path.exists") + @patch("shutil.copy2") + @patch("tempfile.NamedTemporaryFile") + @patch("os.replace") + @patch("os.fsync") + @patch("os.makedirs") + def test_write_env_file_success_with_backup( + self, mock_makedirs, mock_fsync, mock_replace, mock_tempfile, mock_copy, mock_exists + ): + mock_exists.return_value = True + config = {"KEY2": "value2", "KEY1": "value1"} + + mock_temp = Mock() + mock_temp.name = "/tmp/temp_file" + mock_temp.fileno.return_value = 123 + mock_tempfile.return_value.__enter__.return_value = mock_temp + mock_tempfile.return_value.__exit__.return_value = None + + with patch("os.remove") as mock_remove: + success, error = self.manager.write_env_file("/path/to/.env", config) + + assert success is True + assert error is None + mock_copy.assert_called_once_with("/path/to/.env", "/path/to/.env.backup") + mock_remove.assert_called_once_with("/path/to/.env.backup") + self.logger.debug.assert_called() + + @patch("os.path.exists") + @patch("tempfile.NamedTemporaryFile") + @patch("os.replace") + @patch("os.fsync") + @patch("os.makedirs") + def test_write_env_file_success_no_backup_needed( + self, mock_makedirs, mock_fsync, mock_replace, mock_tempfile, mock_exists + ): + mock_exists.return_value = False + config = {"KEY1": "value1"} + + mock_temp = Mock() + mock_temp.name = "/tmp/temp_file" + mock_temp.fileno.return_value = 123 + mock_tempfile.return_value.__enter__.return_value = mock_temp + mock_tempfile.return_value.__exit__.return_value = None + + success, error = self.manager.write_env_file("/path/to/.env", config) + + assert success is True + assert error is None + mock_replace.assert_called_once_with("/tmp/temp_file", "/path/to/.env") + + @patch("os.path.exists") + @patch("shutil.copy2") + def test_write_env_file_backup_failure(self, mock_copy, mock_exists): + mock_exists.return_value = True + mock_copy.side_effect = Exception("Backup failed") + config = {"KEY1": "value1"} + + success, error = self.manager.write_env_file("/path/to/.env", config) + + assert success is False + assert "Failed to create backup" in error + + @patch("os.path.exists") + @patch("shutil.copy2") + @patch("tempfile.NamedTemporaryFile") + def test_write_env_file_write_failure_with_restore(self, mock_tempfile, mock_copy, mock_exists): + mock_exists.return_value = True + config = {"KEY1": "value1"} + + mock_tempfile.side_effect = Exception("Write failed") + + with patch.object(self.manager, "_restore_backup") as mock_restore: + mock_restore.return_value = (True, None) + + success, error = self.manager.write_env_file("/path/to/.env", config) + + assert success is False + assert "Failed to write environment file" in error + mock_restore.assert_called_once_with("/path/to/.env.backup", "/path/to/.env") + self.logger.warning.assert_called() + self.logger.debug.assert_called() + + def test_get_service_env_file_with_custom_env_file(self): + env_file = self.manager.get_service_env_file("api", "/custom/.env") + assert env_file == "/custom/.env" + + def test_get_service_env_file_api_service(self): + env_file = self.manager.get_service_env_file("api") + assert env_file == "/etc/nixopus/source/api/.env" + + def test_get_service_env_file_view_service(self): + env_file = self.manager.get_service_env_file("view") + assert env_file == "/etc/nixopus/source/view/.env" + + def test_get_service_env_file_invalid_service(self): + with pytest.raises(ValueError, match="Invalid service: invalid"): + self.manager.get_service_env_file("invalid") + + +class TestBaseConfig: + def test_valid_config_default(self): + config = BaseConfig() + assert config.service == "api" + assert config.key is None + assert config.value is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.env_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = BaseConfig( + service="view", + key="TEST_KEY", + value="test_value", + verbose=True, + output="json", + dry_run=True, + env_file="/path/to/.env", + ) + assert config.service == "view" + assert config.key == "TEST_KEY" + assert config.value == "test_value" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_exists(self, mock_exists): + mock_exists.return_value = True + config = BaseConfig(env_file="/path/to/.env") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + BaseConfig(env_file="/path/to/.env") + + def test_validate_env_file_none(self): + config = BaseConfig(env_file=None) + assert config.env_file is None + + def test_validate_env_file_empty(self): + config = BaseConfig(env_file="") + assert config.env_file is None + + def test_validate_env_file_whitespace(self): + config = BaseConfig(env_file=" ") + assert config.env_file is None + + def test_validate_env_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = BaseConfig(env_file=" /path/to/.env ") + assert config.env_file == "/path/to/.env" + + +class TestBaseResult: + def test_base_result_default(self): + result = BaseResult(service="api", verbose=False, output="text") + assert result.service == "api" + assert result.key is None + assert result.value is None + assert result.config == {} + assert result.verbose is False + assert result.output == "text" + assert result.success is False + assert result.error is None + + def test_base_result_custom(self): + result = BaseResult( + service="view", + key="TEST_KEY", + value="test_value", + config={"KEY1": "value1"}, + verbose=True, + output="json", + success=True, + error="test error", + ) + assert result.service == "view" + assert result.key == "TEST_KEY" + assert result.value == "test_value" + assert result.config == {"KEY1": "value1"} + assert result.verbose is True + assert result.output == "json" + assert result.success is True + assert result.error == "test error" + + +class TestBaseService: + def setup_method(self): + self.config = BaseConfig() + self.logger = Mock(spec=Logger) + self.environment_service = Mock() + + def test_base_service_init(self): + service = BaseService(self.config, self.logger, self.environment_service) + assert service.config == self.config + assert service.logger == self.logger + assert service.environment_service == self.environment_service + assert service.formatter is None + + def test_base_service_init_defaults(self): + service = BaseService(self.config) + assert service.config == self.config + assert service.logger is not None + assert service.environment_service is None + assert service.formatter is None + + +class TestBaseAction: + def setup_method(self): + self.logger = Mock(spec=Logger) + + def test_base_action_init(self): + action = BaseAction(self.logger) + assert action.logger == self.logger + assert action.formatter is None + + def test_base_action_init_default(self): + action = BaseAction() + assert action.logger is None + assert action.formatter is None + + +def mock_open(read_data=""): + """Helper function to create a mock open function""" + from unittest.mock import mock_open as _mock_open + + return _mock_open(read_data=read_data) diff --git a/cli/tests/commands/conf/test_delete.py b/cli/tests/commands/conf/test_delete.py new file mode 100644 index 00000000..8a20e8ff --- /dev/null +++ b/cli/tests/commands/conf/test_delete.py @@ -0,0 +1,285 @@ +import json +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.conf.delete import Delete, DeleteConfig, DeleteResult, DeleteService, EnvironmentManager +from app.commands.conf.messages import ( + configuration_delete_failed, + configuration_deleted, + dry_run_delete_config, + dry_run_mode, + end_dry_run, + key_required_delete, +) +from app.utils.logger import Logger + + +class TestEnvironmentManager: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.logger.verbose = False # Add verbose attribute to mock + self.manager = EnvironmentManager(self.logger) + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + @patch("app.commands.conf.base.BaseEnvironmentManager.write_env_file") + def test_delete_config_success(self, mock_write_env_file, mock_read_env_file): + mock_read_env_file.return_value = (True, {"KEY1": "value1", "KEY2": "value2"}, None) + mock_write_env_file.return_value = (True, None) + + success, error = self.manager.delete_config("api", "KEY1") + + assert success is True + assert error is None + mock_read_env_file.assert_called_once_with("/etc/nixopus/source/api/.env") + mock_write_env_file.assert_called_once_with("/etc/nixopus/source/api/.env", {"KEY2": "value2"}) + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + def test_delete_config_read_failure(self, mock_read_env_file): + mock_read_env_file.return_value = (False, {}, "File not found") + + success, error = self.manager.delete_config("api", "KEY1") + + assert success is False + assert error == "File not found" + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + def test_delete_config_key_not_found(self, mock_read_env_file): + mock_read_env_file.return_value = (True, {"KEY1": "value1"}, None) + + success, error = self.manager.delete_config("api", "KEY2") + + assert success is False + assert "Configuration key 'KEY2' not found" in error + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + @patch("app.commands.conf.base.BaseEnvironmentManager.write_env_file") + def test_delete_config_write_failure(self, mock_write_env_file, mock_read_env_file): + mock_read_env_file.return_value = (True, {"KEY1": "value1"}, None) + mock_write_env_file.return_value = (False, "Write error") + + success, error = self.manager.delete_config("api", "KEY1") + + assert success is False + assert error == "Write error" + + @patch("app.commands.conf.base.BaseEnvironmentManager.get_service_env_file") + def test_delete_config_with_custom_env_file(self, mock_get_service_env_file): + mock_get_service_env_file.return_value = "/custom/.env" + + with patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") as mock_read: + with patch("app.commands.conf.base.BaseEnvironmentManager.write_env_file") as mock_write: + mock_read.return_value = (True, {"KEY1": "value1"}, None) + mock_write.return_value = (True, None) + + self.manager.delete_config("api", "KEY1", "/custom/.env") + + mock_get_service_env_file.assert_called_once_with("api", "/custom/.env") + + +class TestDeleteConfig: + def test_valid_config_default(self): + config = DeleteConfig(key="TEST_KEY") + assert config.service == "api" + assert config.key == "TEST_KEY" + assert config.value is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.env_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = DeleteConfig( + service="view", key="TEST_KEY", verbose=True, output="json", dry_run=True, env_file="/path/to/.env" + ) + assert config.service == "view" + assert config.key == "TEST_KEY" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.env_file == "/path/to/.env" + + +class TestDeleteResult: + def test_delete_result_default(self): + result = DeleteResult(service="api", key="TEST_KEY", verbose=False, output="text") + assert result.service == "api" + assert result.key == "TEST_KEY" + assert result.value is None + assert result.config == {} + assert result.verbose is False + assert result.output == "text" + assert result.success is False + assert result.error is None + + def test_delete_result_success(self): + result = DeleteResult( + service="view", key="TEST_KEY", config={"KEY1": "value1"}, verbose=True, output="json", success=True + ) + assert result.service == "view" + assert result.key == "TEST_KEY" + assert result.config == {"KEY1": "value1"} + assert result.verbose is True + assert result.output == "json" + assert result.success is True + + +class TestDeleteService: + def setup_method(self): + self.config = DeleteConfig(key="TEST_KEY") + self.logger = Mock(spec=Logger) + self.environment_service = Mock() + self.service = DeleteService(self.config, self.logger, self.environment_service) + + def test_delete_service_init(self): + assert self.service.config == self.config + assert self.service.logger == self.logger + assert self.service.environment_service == self.environment_service + + def test_delete_service_init_defaults(self): + service = DeleteService(self.config) + assert service.config == self.config + assert service.logger is not None + assert service.environment_service is not None + + def test_create_result_success(self): + result = self.service._create_result(True, config_dict={"KEY1": "value1"}) + + assert result.service == "api" + assert result.key == "TEST_KEY" + assert result.config == {"KEY1": "value1"} + assert result.success is True + assert result.error is None + + def test_create_result_failure(self): + result = self.service._create_result(False, error="Test error") + + assert result.service == "api" + assert result.key == "TEST_KEY" + assert result.config == {} + assert result.success is False + assert result.error == "Test error" + + def test_delete_missing_key(self): + self.config.key = None + + result = self.service.delete() + + assert result.success is False + assert result.error == key_required_delete + + def test_delete_success(self): + self.environment_service.delete_config.return_value = (True, None) + + result = self.service.delete() + + assert result.success is True + assert result.error is None + self.environment_service.delete_config.assert_called_once_with("api", "TEST_KEY", None) + + def test_delete_failure(self): + self.environment_service.delete_config.return_value = (False, "Delete error") + + result = self.service.delete() + + assert result.success is False + assert result.error == "Delete error" + + def test_delete_dry_run(self): + self.config.dry_run = True + + result = self.service.delete() + + assert result.success is True + assert result.error is None + self.environment_service.delete_config.assert_not_called() + + def test_delete_and_format_success(self): + self.environment_service.delete_config.return_value = (True, None) + + output = self.service.delete_and_format() + + assert configuration_deleted.format(service="api", key="TEST_KEY") in output + + def test_delete_and_format_failure(self): + self.environment_service.delete_config.return_value = (False, "Delete error") + + output = self.service.delete_and_format() + + assert configuration_delete_failed.format(service="api", error="Delete error") in output + + def test_delete_and_format_dry_run(self): + self.config.dry_run = True + + output = self.service.delete_and_format() + + assert dry_run_mode in output + assert dry_run_delete_config.format(service="api", key="TEST_KEY") in output + assert end_dry_run in output + + def test_format_output_json(self): + result = DeleteResult(service="api", key="TEST_KEY", success=True, verbose=False, output="json") + + output = self.service._format_output(result, "json") + data = json.loads(output) + + assert data["service"] == "api" + assert data["key"] == "TEST_KEY" + assert data["success"] is True + + def test_format_output_text_success(self): + result = DeleteResult(service="api", key="TEST_KEY", success=True, verbose=False, output="text") + + output = self.service._format_output(result, "text") + + assert configuration_deleted.format(service="api", key="TEST_KEY") in output + + def test_format_output_text_failure(self): + result = DeleteResult(service="api", key="TEST_KEY", success=False, error="Test error", verbose=False, output="text") + + output = self.service._format_output(result, "text") + + assert configuration_delete_failed.format(service="api", error="Test error") in output + + +class TestDelete: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.action = Delete(self.logger) + + def test_delete_action_init(self): + assert self.action.logger == self.logger + + def test_delete_action_init_default(self): + action = Delete() + assert action.logger is None + + def test_delete_success(self): + config = DeleteConfig(key="TEST_KEY") + + with patch("app.commands.conf.delete.DeleteService") as mock_service_class: + mock_service = Mock() + mock_service.execute.return_value = DeleteResult( + service="api", key="TEST_KEY", success=True, verbose=False, output="text" + ) + mock_service_class.return_value = mock_service + + result = self.action.delete(config) + + assert result.success is True + assert result.key == "TEST_KEY" + + def test_format_output(self): + result = DeleteResult(service="api", key="TEST_KEY", success=True, verbose=False, output="text") + + with patch("app.commands.conf.delete.DeleteService") as mock_service_class: + mock_service = Mock() + mock_service._format_output.return_value = "formatted output" + mock_service_class.return_value = mock_service + + output = self.action.format_output(result, "text") + + assert output == "formatted output" diff --git a/cli/tests/commands/conf/test_list.py b/cli/tests/commands/conf/test_list.py new file mode 100644 index 00000000..e6bc0c46 --- /dev/null +++ b/cli/tests/commands/conf/test_list.py @@ -0,0 +1,263 @@ +import json +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.conf.list import EnvironmentManager, List, ListConfig, ListResult, ListService +from app.commands.conf.messages import ( + configuration_list_failed, + configuration_listed, + dry_run_list_config, + dry_run_mode, + end_dry_run, + no_configuration_found, +) +from app.utils.logger import Logger + + +class TestEnvironmentManager: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.logger.verbose = False # Add verbose attribute to mock + self.manager = EnvironmentManager(self.logger) + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + def test_list_config_success(self, mock_read_env_file): + mock_read_env_file.return_value = (True, {"KEY1": "value1", "KEY2": "value2"}, None) + + success, config, error = self.manager.list_config("api") + + assert success is True + assert config == {"KEY1": "value1", "KEY2": "value2"} + assert error is None + mock_read_env_file.assert_called_once_with("/etc/nixopus/source/api/.env") + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + def test_list_config_failure(self, mock_read_env_file): + mock_read_env_file.return_value = (False, {}, "File not found") + + success, config, error = self.manager.list_config("api") + + assert success is False + assert config == {} + assert error == "File not found" + + @patch("app.commands.conf.base.BaseEnvironmentManager.get_service_env_file") + def test_list_config_with_custom_env_file(self, mock_get_service_env_file): + mock_get_service_env_file.return_value = "/custom/.env" + + self.manager.list_config("api", "/custom/.env") + + mock_get_service_env_file.assert_called_once_with("api", "/custom/.env") + + +class TestListConfig: + def test_valid_config_default(self): + config = ListConfig() + assert config.service == "api" + assert config.key is None + assert config.value is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.env_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = ListConfig(service="view", verbose=True, output="json", dry_run=True, env_file="/path/to/.env") + assert config.service == "view" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.env_file == "/path/to/.env" + + +class TestListResult: + def test_list_result_default(self): + result = ListResult(service="api", verbose=False, output="text") + assert result.service == "api" + assert result.key is None + assert result.value is None + assert result.config == {} + assert result.verbose is False + assert result.output == "text" + assert result.success is False + assert result.error is None + + def test_list_result_with_config(self): + result = ListResult( + service="view", config={"KEY1": "value1", "KEY2": "value2"}, verbose=True, output="json", success=True + ) + assert result.service == "view" + assert result.config == {"KEY1": "value1", "KEY2": "value2"} + assert result.verbose is True + assert result.output == "json" + assert result.success is True + + +class TestListService: + def setup_method(self): + self.config = ListConfig() + self.logger = Mock(spec=Logger) + self.environment_service = Mock() + self.service = ListService(self.config, self.logger, self.environment_service) + + def test_list_service_init(self): + assert self.service.config == self.config + assert self.service.logger == self.logger + assert self.service.environment_service == self.environment_service + + def test_list_service_init_defaults(self): + service = ListService(self.config) + assert service.config == self.config + assert service.logger is not None + assert service.environment_service is not None + + def test_create_result_success(self): + result = self.service._create_result(True, config_dict={"KEY1": "value1"}) + + assert result.service == "api" + assert result.config == {"KEY1": "value1"} + assert result.success is True + assert result.error is None + + def test_create_result_failure(self): + result = self.service._create_result(False, error="Test error") + + assert result.service == "api" + assert result.config == {} + assert result.success is False + assert result.error == "Test error" + + def test_list_success(self): + self.environment_service.list_config.return_value = (True, {"KEY1": "value1"}, None) + + result = self.service.list() + + assert result.success is True + assert result.config == {"KEY1": "value1"} + assert result.error is None + + def test_list_failure(self): + self.environment_service.list_config.return_value = (False, {}, "File not found") + + result = self.service.list() + + assert result.success is False + assert result.error == "File not found" + self.logger.error.assert_called_once_with(configuration_list_failed.format(service="api", error="File not found")) + + def test_list_dry_run(self): + self.config.dry_run = True + + result = self.service.list() + + assert result.success is True + assert result.error is None + self.environment_service.list_config.assert_not_called() + + def test_list_and_format_success(self): + self.environment_service.list_config.return_value = (True, {"KEY1": "value1"}, None) + + output = self.service.list_and_format() + + assert "KEY1" in output + assert "value1" in output + assert "Key" in output + assert "Value" in output + + def test_list_and_format_failure(self): + self.environment_service.list_config.return_value = (False, {}, "File not found") + + output = self.service.list_and_format() + + assert configuration_list_failed.format(service="api", error="File not found") in output + + def test_list_and_format_dry_run(self): + self.config.dry_run = True + + output = self.service.list_and_format() + + assert dry_run_mode in output + assert dry_run_list_config.format(service="api") in output + assert end_dry_run in output + + def test_format_output_json(self): + result = ListResult(service="api", config={"KEY1": "value1"}, success=True, verbose=False, output="json") + + output = self.service._format_output(result, "json") + data = json.loads(output) + + assert data["success"] is True + assert data["service"] == "api" + assert data["config"] == {"KEY1": "value1"} + + def test_format_output_text_success(self): + result = ListResult( + service="api", config={"KEY1": "value1", "KEY2": "value2"}, success=True, verbose=False, output="text" + ) + + output = self.service._format_output(result, "text") + + assert "KEY1" in output + assert "value1" in output + assert "KEY2" in output + assert "value2" in output + assert "Key" in output + assert "Value" in output + + def test_format_output_text_failure(self): + result = ListResult(service="api", success=False, error="Test error", verbose=False, output="text") + + output = self.service._format_output(result, "text") + + assert configuration_list_failed.format(service="api", error="Test error") in output + + def test_format_output_text_no_config(self): + result = ListResult(service="api", config={}, success=True, verbose=False, output="text") + + output = self.service._format_output(result, "text") + + assert no_configuration_found.format(service="api") in output + + +class TestList: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.action = List(self.logger) + + def test_list_action_init(self): + assert self.action.logger == self.logger + + def test_list_action_init_default(self): + action = List() + assert action.logger is None + + def test_list_success(self): + config = ListConfig(service="api") + + with patch("app.commands.conf.list.ListService") as mock_service_class: + mock_service = Mock() + mock_service.execute.return_value = ListResult( + service="api", config={"KEY1": "value1"}, success=True, verbose=False, output="text" + ) + mock_service_class.return_value = mock_service + + result = self.action.list(config) + + assert result.success is True + assert result.config == {"KEY1": "value1"} + + def test_format_output(self): + result = ListResult(service="api", config={"KEY1": "value1"}, success=True, verbose=False, output="text") + + with patch("app.commands.conf.list.ListService") as mock_service_class: + mock_service = Mock() + mock_service._format_output.return_value = "formatted output" + mock_service_class.return_value = mock_service + + output = self.action.format_output(result, "text") + + assert output == "formatted output" diff --git a/cli/tests/commands/conf/test_set.py b/cli/tests/commands/conf/test_set.py new file mode 100644 index 00000000..8d250b17 --- /dev/null +++ b/cli/tests/commands/conf/test_set.py @@ -0,0 +1,305 @@ +import json +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.conf.messages import ( + configuration_set, + configuration_set_failed, + dry_run_mode, + dry_run_set_config, + end_dry_run, + key_required, + value_required, +) +from app.commands.conf.set import EnvironmentManager, Set, SetConfig, SetResult, SetService +from app.utils.logger import Logger + + +class TestEnvironmentManager: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.logger.verbose = False # Add verbose attribute to mock + self.manager = EnvironmentManager(self.logger) + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + @patch("app.commands.conf.base.BaseEnvironmentManager.write_env_file") + def test_set_config_success(self, mock_write_env_file, mock_read_env_file): + mock_read_env_file.return_value = (True, {"KEY1": "value1"}, None) + mock_write_env_file.return_value = (True, None) + + success, error = self.manager.set_config("api", "KEY2", "value2") + + assert success is True + assert error is None + mock_read_env_file.assert_called_once_with("/etc/nixopus/source/api/.env") + mock_write_env_file.assert_called_once_with("/etc/nixopus/source/api/.env", {"KEY1": "value1", "KEY2": "value2"}) + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + def test_set_config_read_failure(self, mock_read_env_file): + mock_read_env_file.return_value = (False, {}, "File not found") + + success, error = self.manager.set_config("api", "KEY1", "value1") + + assert success is False + assert error == "File not found" + + @patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") + @patch("app.commands.conf.base.BaseEnvironmentManager.write_env_file") + def test_set_config_write_failure(self, mock_write_env_file, mock_read_env_file): + mock_read_env_file.return_value = (True, {"KEY1": "value1"}, None) + mock_write_env_file.return_value = (False, "Write error") + + success, error = self.manager.set_config("api", "KEY2", "value2") + + assert success is False + assert error == "Write error" + + @patch("app.commands.conf.base.BaseEnvironmentManager.get_service_env_file") + def test_set_config_with_custom_env_file(self, mock_get_service_env_file): + mock_get_service_env_file.return_value = "/custom/.env" + + with patch("app.commands.conf.base.BaseEnvironmentManager.read_env_file") as mock_read: + with patch("app.commands.conf.base.BaseEnvironmentManager.write_env_file") as mock_write: + mock_read.return_value = (True, {}, None) + mock_write.return_value = (True, None) + + self.manager.set_config("api", "KEY1", "value1", "/custom/.env") + + mock_get_service_env_file.assert_called_once_with("api", "/custom/.env") + + +class TestSetConfig: + def test_valid_config_default(self): + config = SetConfig(key="TEST_KEY", value="test_value") + assert config.service == "api" + assert config.key == "TEST_KEY" + assert config.value == "test_value" + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.env_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = SetConfig( + service="view", + key="TEST_KEY", + value="test_value", + verbose=True, + output="json", + dry_run=True, + env_file="/path/to/.env", + ) + assert config.service == "view" + assert config.key == "TEST_KEY" + assert config.value == "test_value" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.env_file == "/path/to/.env" + + +class TestSetResult: + def test_set_result_default(self): + result = SetResult(service="api", key="TEST_KEY", value="test_value", verbose=False, output="text") + assert result.service == "api" + assert result.key == "TEST_KEY" + assert result.value == "test_value" + assert result.config == {} + assert result.verbose is False + assert result.output == "text" + assert result.success is False + assert result.error is None + + def test_set_result_success(self): + result = SetResult( + service="view", + key="TEST_KEY", + value="test_value", + config={"KEY1": "value1"}, + verbose=True, + output="json", + success=True, + ) + assert result.service == "view" + assert result.key == "TEST_KEY" + assert result.value == "test_value" + assert result.config == {"KEY1": "value1"} + assert result.verbose is True + assert result.output == "json" + assert result.success is True + + +class TestSetService: + def setup_method(self): + self.config = SetConfig(key="TEST_KEY", value="test_value") + self.logger = Mock(spec=Logger) + self.environment_service = Mock() + self.service = SetService(self.config, self.logger, self.environment_service) + + def test_set_service_init(self): + assert self.service.config == self.config + assert self.service.logger == self.logger + assert self.service.environment_service == self.environment_service + + def test_set_service_init_defaults(self): + service = SetService(self.config) + assert service.config == self.config + assert service.logger is not None + assert service.environment_service is not None + + def test_create_result_success(self): + result = self.service._create_result(True, config_dict={"KEY1": "value1"}) + + assert result.service == "api" + assert result.key == "TEST_KEY" + assert result.value == "test_value" + assert result.config == {"KEY1": "value1"} + assert result.success is True + assert result.error is None + + def test_create_result_failure(self): + result = self.service._create_result(False, error="Test error") + + assert result.service == "api" + assert result.key == "TEST_KEY" + assert result.value == "test_value" + assert result.config == {} + assert result.success is False + assert result.error == "Test error" + + def test_set_missing_key(self): + self.config.key = None + + result = self.service.set() + + assert result.success is False + assert result.error == key_required + + def test_set_missing_value(self): + self.config.value = None + + result = self.service.set() + + assert result.success is False + assert result.error == value_required + + def test_set_success(self): + self.environment_service.set_config.return_value = (True, None) + + result = self.service.set() + + assert result.success is True + assert result.error is None + self.environment_service.set_config.assert_called_once_with("api", "TEST_KEY", "test_value", None) + + def test_set_failure(self): + self.environment_service.set_config.return_value = (False, "Write error") + + result = self.service.set() + + assert result.success is False + assert result.error == "Write error" + + def test_set_dry_run(self): + self.config.dry_run = True + + result = self.service.set() + + assert result.success is True + assert result.error is None + self.environment_service.set_config.assert_not_called() + + def test_set_and_format_success(self): + self.environment_service.set_config.return_value = (True, None) + + output = self.service.set_and_format() + + assert configuration_set.format(service="api", key="TEST_KEY", value="test_value") in output + + def test_set_and_format_failure(self): + self.environment_service.set_config.return_value = (False, "Write error") + + output = self.service.set_and_format() + + assert configuration_set_failed.format(service="api", error="Write error") in output + + def test_set_and_format_dry_run(self): + self.config.dry_run = True + + output = self.service.set_and_format() + + assert dry_run_mode in output + assert dry_run_set_config.format(service="api", key="TEST_KEY", value="test_value") in output + assert end_dry_run in output + + def test_format_output_json(self): + result = SetResult(service="api", key="TEST_KEY", value="test_value", success=True, verbose=False, output="json") + + output = self.service._format_output(result, "json") + data = json.loads(output) + + assert data["service"] == "api" + assert data["key"] == "TEST_KEY" + assert data["value"] == "test_value" + assert data["success"] is True + + def test_format_output_text_success(self): + result = SetResult(service="api", key="TEST_KEY", value="test_value", success=True, verbose=False, output="text") + + output = self.service._format_output(result, "text") + + assert configuration_set.format(service="api", key="TEST_KEY", value="test_value") in output + + def test_format_output_text_failure(self): + result = SetResult( + service="api", key="TEST_KEY", value="test_value", success=False, error="Test error", verbose=False, output="text" + ) + + output = self.service._format_output(result, "text") + + assert configuration_set_failed.format(service="api", error="Test error") in output + + +class TestSet: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.action = Set(self.logger) + + def test_set_action_init(self): + assert self.action.logger == self.logger + + def test_set_action_init_default(self): + action = Set() + assert action.logger is None + + def test_set_success(self): + config = SetConfig(key="TEST_KEY", value="test_value") + + with patch("app.commands.conf.set.SetService") as mock_service_class: + mock_service = Mock() + mock_service.execute.return_value = SetResult( + service="api", key="TEST_KEY", value="test_value", success=True, verbose=False, output="text" + ) + mock_service_class.return_value = mock_service + + result = self.action.set(config) + + assert result.success is True + assert result.key == "TEST_KEY" + assert result.value == "test_value" + + def test_format_output(self): + result = SetResult(service="api", key="TEST_KEY", value="test_value", success=True, verbose=False, output="text") + + with patch("app.commands.conf.set.SetService") as mock_service_class: + mock_service = Mock() + mock_service._format_output.return_value = "formatted output" + mock_service_class.return_value = mock_service + + output = self.action.format_output(result, "text") + + assert output == "formatted output" diff --git a/cli/tests/commands/conflict/__init__.py b/cli/tests/commands/conflict/__init__.py new file mode 100644 index 00000000..908e6eca --- /dev/null +++ b/cli/tests/commands/conflict/__init__.py @@ -0,0 +1,16 @@ +""" +Test package for the conflict command. + +This package contains organized tests for the conflict command functionality, +separated by concerns for better maintainability. +""" + +from .test_config_and_models import TestConfigAndModels +from .test_version_checker import TestVersionChecker +from .test_service_integration import TestServiceIntegration + +__all__ = [ + 'TestConfigAndModels', + 'TestVersionChecker', + 'TestServiceIntegration' +] diff --git a/cli/tests/commands/conflict/test_config.yaml b/cli/tests/commands/conflict/test_config.yaml new file mode 100644 index 00000000..e41a69f1 --- /dev/null +++ b/cli/tests/commands/conflict/test_config.yaml @@ -0,0 +1,16 @@ +deps: + docker: + version: ">=20.10.0, <26.0.0" + version-command: ["docker", "--version"] + go: + version: ">=1.18.0, <2.0.0" + version-command: ["go", "version"] + python: + version: ">=3.8, <3.13" + version-command: ["python", "--version"] + ssh: + version: ">=8.0, <10.0" + version-command: ["ssh", "-V"] + test_tool: + version: ">=1.0.0, <2.0.0" + version-command: ["test_tool", "--version"] diff --git a/cli/tests/commands/conflict/test_config_and_models.py b/cli/tests/commands/conflict/test_config_and_models.py new file mode 100644 index 00000000..b7fc85cb --- /dev/null +++ b/cli/tests/commands/conflict/test_config_and_models.py @@ -0,0 +1,99 @@ +import unittest +import yaml +import tempfile +import os +from app.commands.conflict.models import ( + ConflictConfig, + ConflictCheckResult, +) +from app.commands.conflict.conflict import ( + ConflictChecker, +) +from app.utils.logger import Logger + + +class TestConfigAndModels(unittest.TestCase): + """Test configuration loading and data models""" + + def setUp(self): + self.logger = Logger(verbose=False) + self.config = ConflictConfig(config_file="test_config.yaml", verbose=False, output="text") + + def test_conflict_check_result_creation(self): + """Test ConflictCheckResult model creation""" + result = ConflictCheckResult(tool="docker", expected="20.10.0", current="20.10.5", status="compatible", conflict=False) + + self.assertEqual(result.tool, "docker") + self.assertEqual(result.expected, "20.10.0") + self.assertEqual(result.current, "20.10.5") + self.assertFalse(result.conflict) + + def test_conflict_checker_config_loading(self): + """Test ConflictChecker config loading with valid YAML config""" + config_data = {"deps": {"docker": {"version": "20.10.0"}, "go": {"version": "1.18.0"}, "python": {"version": "3.9.0"}}} + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(config_data, f) + temp_path = f.name + + try: + conflict_config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + # Create ConflictChecker which will load the config internally + checker = ConflictChecker(conflict_config, self.logger) + + # Test that the config was loaded correctly by checking internal state + # We can verify this by calling _load_user_config directly + result = checker._load_user_config(temp_path) + + self.assertEqual(result, config_data) + self.assertIn("deps", result) + self.assertIn("docker", result["deps"]) + self.assertEqual(result["deps"]["docker"]["version"], "20.10.0") + finally: + os.unlink(temp_path) + + def test_config_loading_missing_file(self): + """Test ConflictChecker config loading with missing file""" + conflict_config = ConflictConfig(config_file="nonexistent.yaml", verbose=False, output="text") + + # ConflictChecker initialization should fail with missing config file + with self.assertRaises(FileNotFoundError): + ConflictChecker(conflict_config, self.logger) + + def test_config_loading_invalid_yaml(self): + """Test ConflictChecker config loading with invalid YAML""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write("invalid: yaml: content: [") + temp_path = f.name + + try: + conflict_config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + # ConflictChecker initialization should fail with invalid YAML + with self.assertRaises(Exception): + ConflictChecker(conflict_config, self.logger) + finally: + os.unlink(temp_path) + + def test_empty_deps_handling(self): + """Test handling of empty or missing deps section""" + config_data = {} + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(config_data, f) + temp_path = f.name + + try: + config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + # Test that config is created successfully even with empty deps + self.assertEqual(config.config_file, temp_path) + self.assertFalse(config.verbose) + self.assertEqual(config.output, "text") + finally: + os.unlink(temp_path) + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/commands/conflict/test_conflict.py b/cli/tests/commands/conflict/test_conflict.py new file mode 100644 index 00000000..7b91c34b --- /dev/null +++ b/cli/tests/commands/conflict/test_conflict.py @@ -0,0 +1,53 @@ +""" +Comprehensive test suite for the conflict command. + +This module serves as the main test runner that imports and runs all +the separated test modules for better organization and separation of concerns. + +The conflict command has been refactored into: +- models.py: Data models and configuration classes +- conflict.py: Core business logic and services +""" + +import unittest +import sys +import os + +# Add the tests directory to the path to import the separated test modules +sys.path.insert(0, os.path.dirname(__file__)) + +# Import all the separated test modules +from test_config_and_models import TestConfigAndModels +from test_version_checker import TestVersionChecker +from test_service_integration import TestServiceIntegration + + +def create_test_suite(): + """Create a comprehensive test suite with all conflict command tests.""" + suite = unittest.TestSuite() + + # Add all test classes + suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestConfigAndModels)) + suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestVersionChecker)) + suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestServiceIntegration)) + + return suite + + +class TestConflictCommand(unittest.TestCase): + """Main test class that runs all separated tests.""" + + def test_run_all_conflict_tests(self): + """Run all separated test modules and ensure they pass.""" + suite = create_test_suite() + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + # Ensure all tests passed + self.assertEqual(result.errors, []) + self.assertEqual(result.failures, []) + self.assertTrue(result.wasSuccessful()) + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/commands/conflict/test_service_integration.py b/cli/tests/commands/conflict/test_service_integration.py new file mode 100644 index 00000000..3dced63c --- /dev/null +++ b/cli/tests/commands/conflict/test_service_integration.py @@ -0,0 +1,152 @@ +import unittest +from unittest.mock import patch +import yaml +import tempfile +import os +from app.commands.conflict.models import ( + ConflictConfig, + ConflictCheckResult, +) +from app.commands.conflict.conflict import ( + ConflictService, +) +from app.utils.logger import Logger + + +class TestServiceIntegration(unittest.TestCase): + """Test service integration and formatting""" + + def setUp(self): + self.logger = Logger(verbose=False) + self.config = ConflictConfig( + config_file="test_config.yaml", verbose=False, output="text" + ) + + def test_conflict_service_integration(self): + """Test ConflictService integration with YAML config""" + config_data = {"deps": {"docker": {"version": "20.10.0"}, "go": {"version": "1.18.0"}, "python": {"version": "3.9.0"}}} + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(config_data, f) + temp_path = f.name + + try: + config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + service = ConflictService(config, self.logger) + + # This would normally make real system calls + # In a real test, we'd mock all the checkers + with patch.object(service.checker, "check_conflicts") as mock_check: + mock_check.return_value = [ + ConflictCheckResult( + tool="docker", expected="20.10.0", current="20.10.5", status="compatible", conflict=False + ) + ] + + results = service.check_conflicts() + self.assertEqual(len(results), 1) + self.assertFalse(results[0].conflict) + finally: + os.unlink(temp_path) + + def test_empty_deps_service_handling(self): + """Test ConflictService handling of empty or missing deps section""" + config_data = {} + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(config_data, f) + temp_path = f.name + + try: + config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + service = ConflictService(config, self.logger) + results = service.check_conflicts() + + # Should return empty results for empty deps + self.assertEqual(len(results), 0) + finally: + os.unlink(temp_path) + + def test_conflict_formatter_output(self): + """Test ConflictFormatter output formatting""" + from app.commands.conflict.conflict import ConflictFormatter + + formatter = ConflictFormatter() + + results = [ + ConflictCheckResult(tool="docker", expected="20.10.0", current="20.10.5", status="compatible", conflict=False), + ConflictCheckResult(tool="python", expected="3.9.0", current="3.8.0", status="conflict", conflict=True), + ] + + output = formatter.format_output(results, "text") + + # Should contain both tools + self.assertIn("docker", output) + self.assertIn("python", output) + + # Should indicate status + self.assertIn("compatible", output) + + def test_conflict_formatter_json_output(self): + """Test ConflictFormatter JSON output formatting""" + from app.commands.conflict.conflict import ConflictFormatter + + formatter = ConflictFormatter() + + results = [ + ConflictCheckResult(tool="docker", expected="20.10.0", current="20.10.5", status="compatible", conflict=False) + ] + + output = formatter.format_output(results, "json") + + # Should be valid JSON structure + self.assertIn("docker", output) + self.assertIn("compatible", output) + self.assertIn("20.10.5", output) + + def test_service_with_multiple_tools(self): + """Test ConflictService with multiple tool configurations""" + config_data = { + "deps": { + "docker": {"version": "20.10.0"}, + "go": {"version": "1.18.0"}, + "python": {"version": "3.9.0"}, + "nodejs": {"version": "16.0.0"} + } + } + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(config_data, f) + temp_path = f.name + + try: + config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + service = ConflictService(config, self.logger) + + # Mock the checker to return mixed results + with patch.object(service.checker, "check_conflicts") as mock_check: + mock_check.return_value = [ + ConflictCheckResult(tool="docker", expected="20.10.0", current="20.10.5", status="compatible", conflict=False), + ConflictCheckResult(tool="go", expected="1.18.0", current="1.17.0", status="conflict", conflict=True), + ConflictCheckResult(tool="python", expected="3.9.0", current="3.9.2", status="compatible", conflict=False), + ConflictCheckResult(tool="nodejs", expected="16.0.0", current=None, status="missing", conflict=True), + ] + + results = service.check_conflicts() + self.assertEqual(len(results), 4) + + # Check that we have both compatible and conflict results + compatible_results = [r for r in results if not r.conflict] + conflict_results = [r for r in results if r.conflict] + + self.assertEqual(len(compatible_results), 2) + self.assertEqual(len(conflict_results), 2) + finally: + os.unlink(temp_path) + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/commands/conflict/test_version_checker.py b/cli/tests/commands/conflict/test_version_checker.py new file mode 100644 index 00000000..5ec31a28 --- /dev/null +++ b/cli/tests/commands/conflict/test_version_checker.py @@ -0,0 +1,159 @@ +import unittest +from unittest.mock import Mock, patch, call +import subprocess +from app.commands.conflict.models import ConflictConfig +from app.commands.conflict.conflict import ( + ToolVersionChecker, + ConflictChecker, +) +from app.utils.logger import Logger + + +class TestVersionChecker(unittest.TestCase): + """Test version checking and comparison logic""" + + def setUp(self): + self.logger = Logger(verbose=False) + self.config = ConflictConfig( + config_file="test_config.yaml", verbose=False, output="text" + ) + + @patch("subprocess.run") + def test_tool_version_checker_successful(self, mock_run): + """Test ToolVersionChecker with successful version check""" + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "Docker version 20.10.5, build 55c4c88" + mock_run.return_value = mock_result + + checker = ToolVersionChecker(self.logger, timeout=5) + version = checker.get_tool_version("docker") + + self.assertEqual(version, "20.10.5") + mock_run.assert_called_once_with(["docker", "--version"], capture_output=True, text=True, timeout=5) + + @patch("subprocess.run") + def test_tool_version_checker_not_found(self, mock_run): + """Test ToolVersionChecker with tool not found""" + mock_result = Mock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_run.return_value = mock_result + + checker = ToolVersionChecker(self.logger, timeout=5) + version = checker.get_tool_version("nonexistent") + + self.assertIsNone(version) + + @patch("subprocess.run") + def test_tool_version_checker_timeout(self, mock_run): + """Test ToolVersionChecker with timeout""" + mock_run.side_effect = subprocess.TimeoutExpired("cmd", 5) + + checker = ToolVersionChecker(self.logger, timeout=5) + version = checker.get_tool_version("slow_tool") + + self.assertIsNone(version) + + @patch("app.commands.conflict.conflict.ConflictChecker._load_user_config") + def test_tool_mapping(self, mock_load_config): + """Test tool name mapping for system commands""" + # Provide a dummy config for ConflictChecker + mock_load_config.return_value = {"deps": {"docker": {"version": "20.10.0"}, "go": {"version": "1.18.0"}, "python": {"version": "3.9.0"}}} + deps = {"docker": {"version": "20.10.0"}, "go": {"version": "1.18.0"}, "python": {"version": "3.9.0"}} + + conflict_checker = ConflictChecker(self.config, self.logger) + + # Mock the version checker to simulate tool responses + with patch.object(conflict_checker.version_checker, "get_tool_version") as mock_get_version: + mock_get_version.return_value = "20.10.5" + + results = conflict_checker._check_version_conflicts(deps) + + # Should have called get_tool_version for each tool + self.assertEqual(mock_get_version.call_count, 3) + # Check that we got results for all tools + self.assertEqual(len(results), 3) + # Check that the results have the expected structure + for result in results: + self.assertIn(result.tool, ["docker", "go", "python"]) + self.assertIsNotNone(result.current) + self.assertIsNotNone(result.expected) + self.assertIsInstance(result.conflict, bool) + + def test_version_requirement_none_or_empty(self): + """Test handling of tools with no version requirements""" + import yaml + import tempfile + import os + + config_data = {"deps": {"docker": {"version": ""}, "git": {"version": None}, "python": {}}} # No version key + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(config_data, f) + temp_path = f.name + + try: + config = ConflictConfig(config_file=temp_path, verbose=False, output="text") + + checker = ConflictChecker(config, self.logger) + + # Mock version checker to return versions + with patch.object(checker.version_checker, "get_tool_version") as mock_get_version: + mock_get_version.return_value = "1.0.0" + + results = checker._check_version_conflicts(config_data["deps"]) + + # Only docker and git should be checked (they have version keys) + # python should not be checked (no version key) + self.assertEqual(len(results), 2) + + # All should be compatible (no version requirement) + for result in results: + self.assertFalse(result.conflict) + self.assertEqual(result.expected, "present") + finally: + os.unlink(temp_path) + + def test_tool_version_check_integration(self): + """Test the integration of tool version checking""" + checker = ToolVersionChecker(self.logger, timeout=5) + + # Test that the tool version checking works with mocked subprocess + with patch("subprocess.run") as mock_run: + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "Test version 1.0.0" + mock_run.return_value = mock_result + + version = checker.get_tool_version("test_tool") + + # Should extract version from output + self.assertEqual(version, "1.0.0") + + def test_version_commands_mapping(self): + """Test that different tools use correct version commands""" + deps_config = { + "docker": {"version-command": ["docker", "--version"]}, + "go": {"version-command": ["go", "version"]}, + "ssh": {"version-command": ["ssh", "-V"]}, + } + checker = ToolVersionChecker(self.logger, deps_config, timeout=5) + with patch("subprocess.run") as mock_run: + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "version 1.0.0" + mock_run.return_value = mock_result + # Test Docker uses correct command + checker.get_tool_version("docker") + mock_run.assert_called_with(["docker", "--version"], capture_output=True, text=True, timeout=5) + # Test Go uses correct command + checker.get_tool_version("go") + mock_run.assert_called_with(["go", "version"], capture_output=True, text=True, timeout=5) + # Test SSH uses correct command + checker.get_tool_version("ssh") + mock_run.assert_called_with(["ssh", "-V"], capture_output=True, text=True, timeout=5) + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/commands/install/__init__.py b/cli/tests/commands/install/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/install/test_ssh.py b/cli/tests/commands/install/test_ssh.py new file mode 100644 index 00000000..5823ca88 --- /dev/null +++ b/cli/tests/commands/install/test_ssh.py @@ -0,0 +1,243 @@ +import os +import tempfile +import unittest +from unittest.mock import MagicMock, Mock, patch + +from app.commands.install.ssh import SSH, SSHCommandBuilder, SSHConfig, SSHKeyManager + + +class TestSSHKeyGeneration(unittest.TestCase): + def setUp(self): + self.mock_logger = Mock() + self.temp_dir = tempfile.mkdtemp() + self.test_key_path = os.path.join(self.temp_dir, "test_key") + + def tearDown(self): + import shutil + + shutil.rmtree(self.temp_dir) + + def test_ssh_command_builder_rsa(self): + cmd = SSHCommandBuilder.build_ssh_keygen_command(self.test_key_path, "rsa", 4096, "testpass") + expected = ["ssh-keygen", "-t", "rsa", "-f", self.test_key_path, "-N", "testpass", "-b", "4096"] + self.assertEqual(cmd, expected) + + def test_ssh_command_builder_ed25519_no_passphrase(self): + cmd = SSHCommandBuilder.build_ssh_keygen_command(self.test_key_path, "ed25519", 256) + expected = ["ssh-keygen", "-t", "ed25519", "-f", self.test_key_path, "-N", ""] + self.assertEqual(cmd, expected) + + def test_ssh_command_builder_ecdsa(self): + cmd = SSHCommandBuilder.build_ssh_keygen_command(self.test_key_path, "ecdsa", 256) + expected = ["ssh-keygen", "-t", "ecdsa", "-f", self.test_key_path, "-N", "", "-b", "256"] + self.assertEqual(cmd, expected) + + def test_ssh_command_builder_dsa(self): + cmd = SSHCommandBuilder.build_ssh_keygen_command(self.test_key_path, "dsa", 1024) + expected = ["ssh-keygen", "-t", "dsa", "-f", self.test_key_path, "-N", "", "-b", "1024"] + self.assertEqual(cmd, expected) + + def test_ssh_config_validation_valid_key_type(self): + config = SSHConfig(path=self.test_key_path, key_type="ed25519", key_size=256) + self.assertEqual(config.key_type, "ed25519") + + def test_ssh_config_validation_invalid_key_type(self): + with self.assertRaises(ValueError): + SSHConfig(path=self.test_key_path, key_type="invalid_type", key_size=256) + + def test_ssh_config_validation_valid_key_size(self): + config = SSHConfig(path=self.test_key_path, key_type="rsa", key_size=4096) + self.assertEqual(config.key_size, 4096) + + def test_ssh_config_validation_invalid_key_size(self): + with self.assertRaises(ValueError): + SSHConfig(path=self.test_key_path, key_type="rsa", key_size=512) + + def test_ssh_config_ed25519_key_size_always_256(self): + config = SSHConfig(path=self.test_key_path, key_type="ed25519", key_size=512) + self.assertEqual(config.key_size, 256) + + @patch("subprocess.run") + def test_ssh_key_manager_availability_check_success(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_run.return_value = mock_result + + manager = SSHKeyManager(self.mock_logger) + available, error = manager._check_ssh_keygen_availability() + + self.assertTrue(available) + self.assertIsNone(error) + mock_run.assert_called_once_with(["ssh-keygen", "-h"], capture_output=True, text=True, check=False) + + @patch("subprocess.run") + def test_ssh_key_manager_availability_check_failure(self, mock_run): + mock_result = Mock() + mock_result.returncode = 1 + mock_run.return_value = mock_result + + manager = SSHKeyManager(self.mock_logger) + available, error = manager._check_ssh_keygen_availability() + + self.assertFalse(available) + self.assertIsNone(error) + + @patch("subprocess.run") + def test_ssh_key_manager_version_check(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "OpenSSH_8.9p1" + mock_run.return_value = mock_result + + manager = SSHKeyManager(self.mock_logger) + success, error = manager._check_ssh_keygen_version() + + self.assertTrue(success) + self.assertIsNone(error) + self.mock_logger.debug.assert_called_with("SSH keygen version: OpenSSH_8.9p1") + + @patch("subprocess.run") + def test_ssh_key_manager_success(self, mock_run): + mock_gen_result = Mock() + mock_gen_result.returncode = 0 + + mock_run.return_value = mock_gen_result + + manager = SSHKeyManager(self.mock_logger) + success, error = manager.generate_ssh_key(self.test_key_path, "ed25519", 256) + + self.assertTrue(success) + self.assertIsNone(error) + self.assertEqual(mock_run.call_count, 1) + + @patch("subprocess.run") + def test_ssh_key_manager_failure(self, mock_run): + from subprocess import CalledProcessError + + mock_avail_result = Mock() + mock_avail_result.returncode = 0 + + mock_version_result = Mock() + mock_version_result.returncode = 0 + mock_run.side_effect = CalledProcessError(1, "ssh-keygen", stderr="Permission denied") + + manager = SSHKeyManager(self.mock_logger) + success, error = manager.generate_ssh_key(self.test_key_path, "ed25519", 256) + + self.assertFalse(success) + self.assertEqual(error, "Permission denied") + + @patch("subprocess.run") + def test_ssh_key_manager_availability_failure(self, mock_run): + mock_result = Mock() + mock_result.returncode = 1 + mock_run.return_value = mock_result + + manager = SSHKeyManager(self.mock_logger) + available, error = manager._check_ssh_keygen_availability() + + self.assertFalse(available) + self.assertIsNone(error) + + def test_ssh_service_dry_run(self): + config = SSHConfig(path=self.test_key_path, key_type="ed25519", key_size=256, dry_run=True) + + ssh = SSH(self.mock_logger) + result = ssh.generate(config) + + self.assertTrue(result.success) + self.assertIsNotNone(result.error) + self.assertIn("DRY RUN MODE", result.error) + + @patch("subprocess.run") + def test_ssh_service_force_overwrite(self, mock_run): + from subprocess import CalledProcessError + + with open(self.test_key_path, "w") as f: + f.write("existing key") + + mock_gen_result = Mock() + mock_gen_result.returncode = 0 + + mock_run.return_value = mock_gen_result + + config = SSHConfig(path=self.test_key_path, key_type="ed25519", key_size=256, force=True) + + ssh = SSH(self.mock_logger) + result = ssh.generate(config) + + self.assertFalse(result.success) + self.assertIn("Failed to set permissions", result.error) + + @patch("subprocess.run") + def test_ssh_key_manager_with_permissions(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_run.return_value = mock_result + + manager = SSHKeyManager(self.mock_logger) + + with open(self.test_key_path, "w") as f: + f.write("private key content") + + with open(f"{self.test_key_path}.pub", "w") as f: + f.write("public key content") + + success, error = manager.set_key_permissions(self.test_key_path, f"{self.test_key_path}.pub") + + self.assertTrue(success) + self.assertIsNone(error) + + def test_ssh_key_manager_create_ssh_directory(self): + manager = SSHKeyManager(self.mock_logger) + test_ssh_dir = os.path.join(self.temp_dir, "test_ssh") + + success, error = manager.create_ssh_directory(test_ssh_dir) + + self.assertTrue(success) + self.assertIsNone(error) + self.assertTrue(os.path.exists(test_ssh_dir)) + + @patch("builtins.open", create=True) + def test_ssh_key_manager_add_to_authorized_keys(self, mock_open): + manager = SSHKeyManager(self.mock_logger) + + public_key_path = f"{self.test_key_path}.pub" + with open(public_key_path, "w") as f: + f.write("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI... test@example.com") + + success, error = manager.add_to_authorized_keys(public_key_path) + + self.assertTrue(success) + self.assertIsNone(error) + + def test_ssh_config_with_new_options(self): + config = SSHConfig( + path=self.test_key_path, + key_type="ed25519", + key_size=256, + set_permissions=True, + add_to_authorized_keys=True, + create_ssh_directory=True, + ) + + self.assertTrue(config.set_permissions) + self.assertTrue(config.add_to_authorized_keys) + self.assertTrue(config.create_ssh_directory) + + def test_ssh_config_ed25519_key_size_validation(self): + config = SSHConfig(path=self.test_key_path, key_type="ed25519", key_size=512) + self.assertEqual(config.key_size, 256) + + def test_ssh_config_ecdsa_key_size_validation(self): + valid_sizes = [256, 384, 521] + for size in valid_sizes: + config = SSHConfig(path=self.test_key_path, key_type="ecdsa", key_size=size) + self.assertEqual(config.key_size, size) + + with self.assertRaises(ValueError): + SSHConfig(path=self.test_key_path, key_type="ecdsa", key_size=512) + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/commands/preflight/__init__.py b/cli/tests/commands/preflight/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/preflight/test_deps.py b/cli/tests/commands/preflight/test_deps.py new file mode 100644 index 00000000..ae4af512 --- /dev/null +++ b/cli/tests/commands/preflight/test_deps.py @@ -0,0 +1,339 @@ +import json +import subprocess +import unittest +from typing import List +from unittest.mock import MagicMock, Mock, patch + +from app.commands.preflight.deps import ( + DependencyChecker, + DependencyFormatter, + DependencyValidator, + Deps, + DepsCheckResult, + DepsConfig, + DepsService, +) +from app.utils.lib import Supported +from app.utils.logger import Logger +from app.utils.protocols import LoggerProtocol + + +class MockLogger: + def __init__(self): + self.debug_calls = [] + self.error_calls = [] + self.info_calls = [] + self.warning_calls = [] + self.success_calls = [] + self.highlight_calls = [] + self.verbose = True + + def debug(self, message: str) -> None: + self.debug_calls.append(message) + + def error(self, message: str) -> None: + self.error_calls.append(message) + + def info(self, message: str) -> None: + self.info_calls.append(message) + + def warning(self, message: str) -> None: + self.warning_calls.append(message) + + def success(self, message: str) -> None: + self.success_calls.append(message) + + def highlight(self, message: str) -> None: + self.highlight_calls.append(message) + + +class TestDependencyChecker(unittest.TestCase): + + def setUp(self): + self.mock_logger = MockLogger() + self.checker = DependencyChecker(logger=self.mock_logger) + + @patch("shutil.which") + def test_check_dependency_available(self, mock_which): + mock_which.return_value = "/usr/bin/docker" + + result = self.checker.check_dependency("docker") + + self.assertTrue(result) + mock_which.assert_called_once_with("docker") + self.assertEqual(len(self.mock_logger.debug_calls), 1) + self.assertIn("docker", self.mock_logger.debug_calls[0]) + + @patch("shutil.which") + def test_check_dependency_not_available(self, mock_which): + mock_which.return_value = None + + result = self.checker.check_dependency("nonexistent") + + self.assertFalse(result) + mock_which.assert_called_once_with("nonexistent") + + @patch("shutil.which") + def test_check_dependency_timeout(self, mock_which): + mock_which.side_effect = subprocess.TimeoutExpired("command", 5) + + result = self.checker.check_dependency("slow_command") + + self.assertFalse(result) + self.assertEqual(len(self.mock_logger.error_calls), 1) + self.assertIn("slow_command", self.mock_logger.error_calls[0]) + + @patch("shutil.which") + def test_check_dependency_exception(self, mock_which): + mock_which.side_effect = Exception("Test exception") + + result = self.checker.check_dependency("failing_command") + + self.assertFalse(result) + self.assertEqual(len(self.mock_logger.error_calls), 1) + self.assertIn("failing_command", self.mock_logger.error_calls[0]) + + +class TestDependencyValidator(unittest.TestCase): + + def setUp(self): + self.validator = DependencyValidator() + + def test_validate_os_valid(self): + result = self.validator.validate_os("linux") + self.assertEqual(result, "linux") + + result = self.validator.validate_os("darwin") + self.assertEqual(result, "darwin") + + def test_validate_os_invalid(self): + with self.assertRaises(ValueError) as context: + self.validator.validate_os("windows") + + self.assertIn("windows", str(context.exception)) + + def test_validate_package_manager_valid(self): + valid_managers = ["apt", "yum", "dnf", "pacman", "apk", "brew"] + for manager in valid_managers: + result = self.validator.validate_package_manager(manager) + self.assertEqual(result, manager) + + def test_validate_package_manager_invalid(self): + with self.assertRaises(ValueError) as context: + self.validator.validate_package_manager("invalid_manager") + + self.assertIn("invalid_manager", str(context.exception)) + + +class TestDependencyFormatter(unittest.TestCase): + + def setUp(self): + self.formatter = DependencyFormatter() + self.sample_results = [ + DepsCheckResult( + dependency="docker", + verbose=False, + output="text", + os="linux", + package_manager="apt", + is_available=True, + ), + DepsCheckResult( + dependency="kubectl", + verbose=False, + output="text", + os="linux", + package_manager="apt", + is_available=False, + ), + ] + + def test_format_output_text(self): + result = self.formatter.format_output(self.sample_results, "text") + self.assertIn("docker", result) + self.assertIn("kubectl", result) + self.assertIn("available", result) + self.assertIn("not available", result) + + def test_format_output_json(self): + result = self.formatter.format_output(self.sample_results, "json") + parsed = json.loads(result) + self.assertEqual(len(parsed), 2) + self.assertTrue(parsed[0]["is_available"]) + self.assertFalse(parsed[1]["is_available"]) + + def test_format_output_invalid(self): + pass + + +class TestDepsCheckResult(unittest.TestCase): + + def test_deps_check_result_creation(self): + result = DepsCheckResult( + dependency="docker", + verbose=True, + output="json", + os="linux", + package_manager="apt", + is_available=True, + error=None, + ) + + self.assertEqual(result.dependency, "docker") + self.assertTrue(result.verbose) + self.assertEqual(result.output, "json") + self.assertEqual(result.os, "linux") + self.assertEqual(result.package_manager, "apt") + self.assertTrue(result.is_available) + self.assertIsNone(result.error) + + def test_deps_check_result_with_error(self): + result = DepsCheckResult( + dependency="failing_dep", + timeout=5, + verbose=False, + output="text", + os="darwin", + package_manager="brew", + is_available=False, + error="Command not found", + ) + + self.assertFalse(result.is_available) + self.assertEqual(result.error, "Command not found") + + +class TestDepsConfig(unittest.TestCase): + + def test_valid_config(self): + config = DepsConfig( + deps=["docker", "kubectl"], verbose=True, output="json", os="linux", package_manager="apt" + ) + + self.assertEqual(config.deps, ["docker", "kubectl"]) + self.assertTrue(config.verbose) + self.assertEqual(config.output, "json") + self.assertEqual(config.os, "linux") + self.assertEqual(config.package_manager, "apt") + + def test_config_validation_os(self): + with self.assertRaises(ValueError): + DepsConfig(deps=["docker"], os="invalid_os", package_manager="apt") + + def test_config_validation_package_manager(self): + with self.assertRaises(ValueError): + DepsConfig(deps=["docker"], os="linux", package_manager="invalid_manager") + + def test_config_timeout_validation(self): + pass + + def test_config_deps_validation(self): + with self.assertRaises(ValueError): + DepsConfig(deps=[], os="linux", package_manager="apt") + + +class TestDepsService(unittest.TestCase): + + def setUp(self): + self.config = DepsConfig( + deps=["docker", "kubectl"], verbose=False, output="text", os="linux", package_manager="apt" + ) + self.mock_logger = MockLogger() + self.mock_checker = Mock() + self.service = DepsService(config=self.config, logger=self.mock_logger, checker=self.mock_checker) + + def test_create_result(self): + result = self.service._create_result("docker", True) + + self.assertEqual(result.dependency, "docker") + self.assertFalse(result.verbose) + self.assertEqual(result.output, "text") + self.assertEqual(result.os, "linux") + self.assertEqual(result.package_manager, "apt") + self.assertTrue(result.is_available) + self.assertIsNone(result.error) + + def test_create_result_with_error(self): + result = self.service._create_result("failing_dep", False, "Command not found") + + self.assertFalse(result.is_available) + self.assertEqual(result.error, "Command not found") + + def test_check_single_dependency_success(self): + self.mock_checker.check_dependency.return_value = True + + result = self.service._check_dependency("docker") + + self.assertTrue(result.is_available) + self.mock_checker.check_dependency.assert_called_once_with("docker") + + def test_check_single_dependency_failure(self): + self.mock_checker.check_dependency.return_value = False + + result = self.service._check_dependency("nonexistent") + + self.assertFalse(result.is_available) + self.mock_checker.check_dependency.assert_called_once_with("nonexistent") + + def test_check_single_dependency_exception(self): + self.mock_checker.check_dependency.side_effect = Exception("Test error") + + result = self.service._check_dependency("failing_dep") + + self.assertFalse(result.is_available) + self.assertEqual(result.error, "Test error") + + @patch("app.commands.preflight.deps.ParallelProcessor") + def test_check_dependencies(self, mock_parallel_processor): + mock_results = [self.service._create_result("docker", True), self.service._create_result("kubectl", False)] + mock_parallel_processor.process_items.return_value = mock_results + + results = self.service.check_dependencies() + + self.assertEqual(len(results), 2) + mock_parallel_processor.process_items.assert_called_once() + + def test_check_and_format(self): + mock_results = [self.service._create_result("docker", True), self.service._create_result("kubectl", False)] + + with patch.object(self.service, "check_dependencies", return_value=mock_results): + result = self.service.check_and_format() + + self.assertIn("docker", result) + self.assertIn("kubectl", result) + self.assertIn("available", result) + self.assertIn("not available", result) + + +class TestDeps(unittest.TestCase): + + def setUp(self): + self.mock_logger = MockLogger() + self.deps = Deps(logger=self.mock_logger) + + def test_check(self): + config = DepsConfig(deps=["docker"], os="linux", package_manager="apt") + + with patch("app.commands.preflight.deps.DepsService") as mock_service_class: + mock_service = Mock() + mock_results = [Mock()] + mock_service.check_dependencies.return_value = mock_results + mock_service_class.return_value = mock_service + + results = self.deps.check(config) + + self.assertEqual(results, mock_results) + mock_service_class.assert_called_once_with(config, logger=self.mock_logger) + + def test_format_output(self): + mock_results = [Mock()] + + with patch.object(self.deps.formatter, "format_output", return_value="formatted") as mock_format: + result = self.deps.format_output(mock_results, "text") + + self.assertEqual(result, "formatted") + mock_format.assert_called_once_with(mock_results, "text") + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/commands/preflight/test_port.py b/cli/tests/commands/preflight/test_port.py new file mode 100644 index 00000000..32e9f92c --- /dev/null +++ b/cli/tests/commands/preflight/test_port.py @@ -0,0 +1,108 @@ +from typing import List + +import pytest + +from app.commands.preflight.port import PortCheckResult, PortConfig, PortService + + +class TestPort: + def test_valid_ports(self): + ports = [80, 443, 8080] + config = PortConfig(ports=ports) + assert config.ports == [80, 443, 8080] + + def test_empty_ports_list(self): + with pytest.raises(ValueError): + PortConfig(ports=[]) + + def test_valid_host_localhost(self): + config = PortConfig(ports=[80], host="localhost") + assert config.host == "localhost" + + def test_valid_host_ipv4(self): + config = PortConfig(ports=[80], host="192.168.1.1") + assert config.host == "192.168.1.1" + + def test_valid_host_ipv4_loopback(self): + config = PortConfig(ports=[80], host="127.0.0.1") + assert config.host == "127.0.0.1" + + def test_valid_host_domain(self): + config = PortConfig(ports=[80], host="example.com") + assert config.host == "example.com" + + def test_valid_host_subdomain(self): + config = PortConfig(ports=[80], host="api.example.com") + assert config.host == "api.example.com" + + def test_valid_host_domain_with_hyphens(self): + config = PortConfig(ports=[80], host="my-domain.com") + assert config.host == "my-domain.com" + + def test_invalid_host_invalid_ip(self): + with pytest.raises(ValueError, match="Host must be 'localhost', a valid IP address, or a valid domain name"): + PortConfig(ports=[80], host="256.256.256.256") + + def test_invalid_host_empty(self): + with pytest.raises(ValueError): + PortConfig(ports=[80], host="") + + def test_invalid_host_invalid_domain(self): + with pytest.raises(ValueError, match="Host must be 'localhost', a valid IP address, or a valid domain name"): + PortConfig(ports=[80], host="invalid..domain") + + def test_check_ports_basic(self): + config = PortConfig(ports=[80, 443], host="localhost", timeout=1, verbose=False) + port_service = PortService(config) + results = port_service.check_ports() + assert len(results) == 2 + assert all("port" in result for result in results) + assert all("status" in result for result in results) + assert all("host" in result for result in results) + assert all(result["error"] is None for result in results) + assert all(result["is_available"] is True for result in results) + + def test_check_ports_verbose(self): + config = PortConfig(ports=[80, 443], host="localhost", timeout=1, verbose=True) + port_service = PortService(config) + results = port_service.check_ports() + assert len(results) == 2 + assert all("port" in result for result in results) + assert all("status" in result for result in results) + assert all("host" in result for result in results) + hosts = [result["host"] for result in results] + assert all(host in ("localhost", None) for host in hosts) + assert all(result["error"] is None for result in results) + assert all(result["is_available"] is True for result in results) + + +def test_port_check_result_type(): + """Test that PortCheckResult has correct structure""" + result: PortCheckResult = {"port": 8080, "status": "available", "host": "localhost", "error": None, "is_available": True} + + assert isinstance(result["port"], int) + assert isinstance(result["status"], str) + assert isinstance(result["host"], str) or result["host"] is None + assert isinstance(result["error"], str) or result["error"] is None + assert isinstance(result["is_available"], bool) + + +def test_check_ports_return_type(): + """Test that check_ports returns correct type""" + config = PortConfig(ports=[8080, 3000], host="localhost", timeout=1, verbose=False) + port_service = PortService(config) + results: List[PortCheckResult] = port_service.check_ports() + + assert isinstance(results, list) + for result in results: + assert isinstance(result, dict) + assert "port" in result + assert "status" in result + assert "host" in result + assert "error" in result + assert "is_available" in result + assert isinstance(result["port"], int) + assert isinstance(result["status"], str) + assert isinstance(result["host"], str) or result["host"] is None + assert isinstance(result["error"], str) or result["error"] is None + assert isinstance(result["is_available"], bool) diff --git a/cli/tests/commands/proxy/__init__.py b/cli/tests/commands/proxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/proxy/test_load.py b/cli/tests/commands/proxy/test_load.py new file mode 100644 index 00000000..5c28d937 --- /dev/null +++ b/cli/tests/commands/proxy/test_load.py @@ -0,0 +1,32 @@ +from unittest.mock import patch + +import pytest +from typer.testing import CliRunner + +from app.commands.proxy.command import proxy_app + +runner = CliRunner() + + +def test_load_success(tmp_path): + config_file = tmp_path / "caddy.json" + config_file.write_text("{}") + with patch("app.commands.proxy.load.CaddyService.load_config_file", return_value=(True, "ok")): + result = runner.invoke(proxy_app, ["load", "--config-file", str(config_file)]) + assert result.exit_code == 0 + assert "successfully" in result.output + + +def test_load_missing_config(): + result = runner.invoke(proxy_app, ["load"]) + assert result.exit_code != 0 + assert "Configuration file is required" in result.output + + +def test_load_error(tmp_path): + config_file = tmp_path / "caddy.json" + config_file.write_text("{}") + with patch("app.commands.proxy.load.CaddyService.load_config_file", return_value=(False, "fail")): + result = runner.invoke(proxy_app, ["load", "--config-file", str(config_file)]) + assert result.exit_code != 0 + assert "fail" in result.output diff --git a/cli/tests/commands/proxy/test_status.py b/cli/tests/commands/proxy/test_status.py new file mode 100644 index 00000000..8ea54456 --- /dev/null +++ b/cli/tests/commands/proxy/test_status.py @@ -0,0 +1,22 @@ +from unittest.mock import patch + +import pytest +from typer.testing import CliRunner + +from app.commands.proxy.command import proxy_app + +runner = CliRunner() + + +def test_status_running(): + with patch("app.commands.proxy.status.CaddyService.get_status", return_value=(True, "Caddy is running")): + result = runner.invoke(proxy_app, ["status"]) + assert result.exit_code == 0 + assert "running" in result.output + + +def test_status_not_running(): + with patch("app.commands.proxy.status.CaddyService.get_status", return_value=(False, "not running")): + result = runner.invoke(proxy_app, ["status"]) + assert result.exit_code != 0 + assert "not running" in result.output diff --git a/cli/tests/commands/proxy/test_stop.py b/cli/tests/commands/proxy/test_stop.py new file mode 100644 index 00000000..b9547dbd --- /dev/null +++ b/cli/tests/commands/proxy/test_stop.py @@ -0,0 +1,22 @@ +from unittest.mock import patch + +import pytest +from typer.testing import CliRunner + +from app.commands.proxy.command import proxy_app + +runner = CliRunner() + + +def test_stop_success(): + with patch("app.commands.proxy.stop.CaddyService.stop_caddy", return_value=(True, "Caddy stopped successfully")): + result = runner.invoke(proxy_app, ["stop"]) + assert result.exit_code == 0 + assert "stopped successfully" in result.output + + +def test_stop_error(): + with patch("app.commands.proxy.stop.CaddyService.stop_caddy", return_value=(False, "fail")): + result = runner.invoke(proxy_app, ["stop"]) + assert result.exit_code != 0 + assert "fail" in result.output diff --git a/cli/tests/commands/service/__init__.py b/cli/tests/commands/service/__init__.py new file mode 100644 index 00000000..d4839a6b --- /dev/null +++ b/cli/tests/commands/service/__init__.py @@ -0,0 +1 @@ +# Tests package diff --git a/cli/tests/commands/service/test_base.py b/cli/tests/commands/service/test_base.py new file mode 100644 index 00000000..232e0626 --- /dev/null +++ b/cli/tests/commands/service/test_base.py @@ -0,0 +1,286 @@ +import os +import subprocess +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.service.base import ( + BaseAction, + BaseConfig, + BaseDockerCommandBuilder, + BaseDockerService, + BaseFormatter, + BaseResult, + BaseService, +) +from app.commands.service.up import UpConfig +from app.utils.logger import Logger + + +class TestBaseDockerCommandBuilder: + def test_build_command_up_default(self): + cmd = BaseDockerCommandBuilder.build_command("up", "all", None, None, detach=True) + assert cmd == ["docker", "compose", "up", "-d"] + + def test_build_command_up_with_service(self): + cmd = BaseDockerCommandBuilder.build_command("up", "web", None, None, detach=True) + assert cmd == ["docker", "compose", "up", "-d", "web"] + + def test_build_command_up_without_detach(self): + cmd = BaseDockerCommandBuilder.build_command("up", "all", None, None, detach=False) + assert cmd == ["docker", "compose", "up"] + + def test_build_command_down_default(self): + cmd = BaseDockerCommandBuilder.build_command("down", "all", None, None) + assert cmd == ["docker", "compose", "down"] + + def test_build_command_down_with_service(self): + cmd = BaseDockerCommandBuilder.build_command("down", "web", None, None) + assert cmd == ["docker", "compose", "down", "web"] + + def test_build_command_with_env_file(self): + cmd = BaseDockerCommandBuilder.build_command("up", "all", "/path/to/.env", None, detach=True) + assert cmd == ["docker", "compose", "up", "-d", "--env-file", "/path/to/.env"] + + def test_build_command_with_compose_file(self): + cmd = BaseDockerCommandBuilder.build_command("up", "all", None, "/path/to/docker-compose.yml", detach=True) + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "up", "-d"] + + def test_build_command_with_all_parameters(self): + cmd = BaseDockerCommandBuilder.build_command("up", "web", "/path/to/.env", "/path/to/docker-compose.yml", detach=False) + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "up", "--env-file", "/path/to/.env", "web"] + + +class TestBaseFormatter: + def setup_method(self): + self.formatter = BaseFormatter() + + def test_format_output_success(self): + result = BaseResult(name="web", env_file=None, verbose=False, output="text", success=True) + formatted = self.formatter.format_output(result, "text", "Services started: {services}", "Service failed: {error}") + assert formatted == "" + + def test_format_output_failure(self): + result = BaseResult(name="web", env_file=None, verbose=False, output="text", success=False, error="Service not found") + formatted = self.formatter.format_output(result, "text", "Services started: {services}", "Service failed: {error}") + assert "Service not found" in formatted + + def test_format_output_json(self): + result = BaseResult(name="web", env_file=None, verbose=False, output="json", success=True) + formatted = self.formatter.format_output(result, "json", "Services started: {services}", "Service failed: {error}") + import json + + data = json.loads(formatted) + assert data["success"] is True + assert "Services started: web" in data["message"] + + def test_format_dry_run(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = UpConfig(name="web", env_file="/path/to/.env", dry_run=True, detach=True) + + class MockCommandBuilder: + def build_up_command(self, name, detach, env_file, compose_file): + return ["docker", "compose", "up", "-d", "web"] + + dry_run_messages = { + "mode": "=== DRY RUN MODE ===", + "command_would_be_executed": "The following commands would be executed:", + "command": "Command:", + "service": "Service:", + "env_file": "Environment file:", + "detach_mode": "Detach mode:", + "end": "=== END DRY RUN ===", + } + + formatted = self.formatter.format_dry_run(config, MockCommandBuilder(), dry_run_messages) + assert "=== DRY RUN MODE ===" in formatted + assert "Command:" in formatted + assert "Service: web" in formatted + assert "Environment file: /path/to/.env" in formatted + assert "Detach mode: True" in formatted + + +class TestBaseDockerService: + def setup_method(self): + self.logger = Mock(spec=Logger) + + @patch("subprocess.Popen") + def test_execute_services_success(self, mock_popen): + mock_process = Mock() + mock_process.stdout = ["line1\n", "line2\n"] + mock_process.wait.return_value = 0 + mock_popen.return_value = mock_process + + docker_service = BaseDockerService(self.logger, "up") + + success, error = docker_service.execute_services("web") + + assert success is True + assert error == "line1\nline2" + + @patch("subprocess.run") + def test_execute_services_failure(self, mock_run): + mock_run.side_effect = subprocess.CalledProcessError(1, "docker compose", stderr="Service not found") + docker_service = BaseDockerService(self.logger, "down") + + success, error = docker_service.execute_services("web") + + assert success is False + assert error == "Service not found" + self.logger.error.assert_called_once_with("Service down failed: Service not found") + + @patch("subprocess.Popen") + def test_execute_services_unexpected_error(self, mock_popen): + mock_popen.side_effect = Exception("Unexpected error") + docker_service = BaseDockerService(self.logger, "up") + + success, error = docker_service.execute_services("web") + + assert success is False + assert error == "Unexpected error" + + +class TestBaseConfig: + def test_valid_config_default(self): + config = BaseConfig() + assert config.name == "all" + assert config.env_file is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.compose_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = BaseConfig( + name="web", + env_file="/path/to/.env", + verbose=True, + output="json", + dry_run=True, + compose_file="/path/to/docker-compose.yml", + ) + assert config.name == "web" + assert config.env_file == "/path/to/.env" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_env_file_exists(self, mock_exists): + mock_exists.return_value = True + config = BaseConfig(env_file="/path/to/.env") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + BaseConfig(env_file="/path/to/.env") + + def test_validate_env_file_none(self): + config = BaseConfig(env_file=None) + assert config.env_file is None + + def test_validate_env_file_empty(self): + config = BaseConfig(env_file="") + assert config.env_file is None + + def test_validate_env_file_whitespace(self): + config = BaseConfig(env_file=" ") + assert config.env_file is None + + def test_validate_env_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = BaseConfig(env_file=" /path/to/.env ") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_compose_file_exists(self, mock_exists): + mock_exists.return_value = True + config = BaseConfig(compose_file="/path/to/docker-compose.yml") + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_compose_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + BaseConfig(compose_file="/path/to/docker-compose.yml") + + def test_validate_compose_file_none(self): + config = BaseConfig(compose_file=None) + assert config.compose_file is None + + def test_validate_compose_file_empty(self): + config = BaseConfig(compose_file="") + assert config.compose_file is None + + def test_validate_compose_file_whitespace(self): + config = BaseConfig(compose_file=" ") + assert config.compose_file is None + + def test_validate_compose_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = BaseConfig(compose_file=" /path/to/docker-compose.yml ") + assert config.compose_file == "/path/to/docker-compose.yml" + + +class TestBaseResult: + def test_base_result_creation(self): + result = BaseResult(name="web", env_file="/path/to/.env", verbose=True, output="json", success=True, error=None) + + assert result.name == "web" + assert result.env_file == "/path/to/.env" + assert result.verbose is True + assert result.output == "json" + assert result.success is True + assert result.error is None + + def test_base_result_default_success(self): + result = BaseResult(name="web", env_file=None, verbose=False, output="text") + + assert result.name == "web" + assert result.success is False + assert result.error is None + + +class TestBaseService: + def setup_method(self): + self.config = BaseConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + self.logger = Mock(spec=Logger) + self.docker_service = Mock() + self.service = BaseService(self.config, self.logger, self.docker_service) + + def test_create_result_not_implemented(self): + with pytest.raises(NotImplementedError): + self.service._create_result(True) + + def test_execute_not_implemented(self): + with pytest.raises(NotImplementedError): + self.service.execute() + + def test_execute_and_format_not_implemented(self): + with pytest.raises(NotImplementedError): + self.service.execute_and_format() + + +class TestBaseAction: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.action = BaseAction(self.logger) + + def test_execute_not_implemented(self): + config = BaseConfig(name="web") + with pytest.raises(NotImplementedError): + self.action.execute(config) + + def test_format_output_not_implemented(self): + result = BaseResult(name="web", env_file=None, verbose=False, output="text") + with pytest.raises(NotImplementedError): + self.action.format_output(result, "text") diff --git a/cli/tests/commands/service/test_down.py b/cli/tests/commands/service/test_down.py new file mode 100644 index 00000000..f563451d --- /dev/null +++ b/cli/tests/commands/service/test_down.py @@ -0,0 +1,365 @@ +import subprocess +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.service.down import ( + DockerCommandBuilder, + DockerService, + Down, + DownConfig, + DownFormatter, + DownResult, + DownService, +) +from app.commands.service.messages import ( + dry_run_command, + dry_run_env_file, + dry_run_mode, + dry_run_service, + services_stopped_successfully, +) +from app.utils.logger import Logger + + +class TestDockerCommandBuilder: + def test_build_down_command_default(self): + cmd = DockerCommandBuilder.build_down_command() + assert cmd == ["docker", "compose", "down"] + + def test_build_down_command_with_service_name(self): + cmd = DockerCommandBuilder.build_down_command("web") + assert cmd == ["docker", "compose", "down", "web"] + + def test_build_down_command_with_env_file(self): + cmd = DockerCommandBuilder.build_down_command("all", "/path/to/.env") + assert cmd == ["docker", "compose", "down", "--env-file", "/path/to/.env"] + + def test_build_down_command_with_compose_file(self): + cmd = DockerCommandBuilder.build_down_command("all", None, "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "down"] + + def test_build_down_command_with_all_parameters(self): + cmd = DockerCommandBuilder.build_down_command("api", "/path/to/.env", "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "down", "--env-file", "/path/to/.env", "api"] + + +class TestDownFormatter: + def setup_method(self): + self.formatter = DownFormatter() + + def test_format_output_success(self): + result = DownResult(name="web", env_file=None, verbose=False, output="text", success=True) + formatted = self.formatter.format_output(result, "text") + assert formatted == "" + + def test_format_output_failure(self): + result = DownResult(name="web", env_file=None, verbose=False, output="text", success=False, error="Service not found") + formatted = self.formatter.format_output(result, "text") + assert "Service not found" in formatted + + def test_format_output_json(self): + result = DownResult(name="web", env_file=None, verbose=False, output="json", success=True) + formatted = self.formatter.format_output(result, "json") + import json + + data = json.loads(formatted) + assert data["success"] is True + expected_message = services_stopped_successfully.format(services="web") + assert expected_message in data["message"] + + def test_format_output_invalid(self): + result = DownResult(name="web", env_file=None, verbose=False, output="invalid", success=True) + # The formatter doesn't validate output format, so no ValueError is raised + formatted = self.formatter.format_output(result, "invalid") + assert formatted == "" + + def test_format_dry_run_default(self): + config = DownConfig(name="all", env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_mode in formatted + assert dry_run_command in formatted + assert dry_run_service.format(service="all") in formatted + + def test_format_dry_run_with_service(self): + config = DownConfig(name="web", env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_service.format(service="web") in formatted + + def test_format_dry_run_with_env_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = DownConfig(name="all", env_file="/path/to/.env", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_env_file.format(env_file="/path/to/.env") in formatted + + def test_format_dry_run_with_compose_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = DownConfig(name="all", compose_file="/path/to/docker-compose.yml", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert "Command:" in formatted + + +class TestDockerService: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.docker_service = DockerService(self.logger) + + @patch("subprocess.run") + def test_stop_services_success(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.stop_services("web") + + assert success is True + assert error == "" + + @patch("subprocess.run") + def test_stop_services_with_env_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.stop_services("all", "/path/to/.env") + + assert success is True + assert error == "" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "down", "--env-file", "/path/to/.env"] + + @patch("subprocess.run") + def test_stop_services_with_compose_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.stop_services("all", None, "/path/to/docker-compose.yml") + + assert success is True + assert error == "" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "down"] + + @patch("subprocess.run") + def test_stop_services_failure(self, mock_run): + mock_run.side_effect = subprocess.CalledProcessError(1, "docker compose down", stderr="Service not found") + + success, error = self.docker_service.stop_services("web") + + assert success is False + assert error == "Service not found" + expected_error = "Service down failed: Service not found" + self.logger.error.assert_called_once_with(expected_error) + + @patch("subprocess.run") + def test_stop_services_unexpected_error(self, mock_run): + mock_run.side_effect = Exception("Unexpected error") + + success, error = self.docker_service.stop_services("web") + + assert success is False + assert error == "Unexpected error" + expected_error = "Unexpected error during down: Unexpected error" + self.logger.error.assert_called_once_with(expected_error) + + +class TestDownConfig: + def test_valid_config_default(self): + config = DownConfig() + assert config.name == "all" + assert config.env_file is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.compose_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = DownConfig( + name="web", + env_file="/path/to/.env", + verbose=True, + output="json", + dry_run=True, + compose_file="/path/to/docker-compose.yml", + ) + assert config.name == "web" + assert config.env_file == "/path/to/.env" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_env_file_exists(self, mock_exists): + mock_exists.return_value = True + config = DownConfig(env_file="/path/to/.env") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + DownConfig(env_file="/path/to/.env") + + def test_validate_env_file_none(self): + config = DownConfig(env_file=None) + assert config.env_file is None + + def test_validate_env_file_empty(self): + config = DownConfig(env_file="") + assert config.env_file is None + + def test_validate_env_file_whitespace(self): + config = DownConfig(env_file=" ") + assert config.env_file is None + + def test_validate_env_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = DownConfig(env_file=" /path/to/.env ") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_compose_file_exists(self, mock_exists): + mock_exists.return_value = True + config = DownConfig(compose_file="/path/to/docker-compose.yml") + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_compose_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + DownConfig(compose_file="/path/to/docker-compose.yml") + + def test_validate_compose_file_none(self): + config = DownConfig(compose_file=None) + assert config.compose_file is None + + def test_validate_compose_file_empty(self): + config = DownConfig(compose_file="") + assert config.compose_file is None + + def test_validate_compose_file_whitespace(self): + config = DownConfig(compose_file=" ") + assert config.compose_file is None + + def test_validate_compose_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = DownConfig(compose_file=" /path/to/docker-compose.yml ") + assert config.compose_file == "/path/to/docker-compose.yml" + + +class TestDownService: + def setup_method(self): + self.config = DownConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + self.logger = Mock(spec=Logger) + self.docker_service = Mock() + self.service = DownService(self.config, self.logger, self.docker_service) + + def test_create_result_success(self): + result = self.service._create_result(True) + assert result.name == "web" + assert result.success is True + assert result.error is None + assert result.output == "text" + assert result.verbose is False + + def test_create_result_failure(self): + result = self.service._create_result(False, "Service not found") + assert result.success is False + assert result.error == "Service not found" + + def test_down_success(self): + self.docker_service.stop_services.return_value = (True, None) + + result = self.service.down() + + assert result.success is True + assert result.error is None + self.docker_service.stop_services.assert_called_once_with("web", None, None) + + def test_down_failure(self): + self.docker_service.stop_services.return_value = (False, "Service not found") + + result = self.service.down() + + assert result.success is False + assert result.error == "Service not found" + + def test_down_and_format_dry_run(self): + self.config.dry_run = True + formatted = self.service.down_and_format() + assert dry_run_mode in formatted + assert dry_run_command in formatted + + def test_down_and_format_success(self): + self.docker_service.stop_services.return_value = (True, "") + formatted = self.service.down_and_format() + assert formatted == "" + + +class TestDown: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.down = Down(self.logger) + + def test_down_success(self): + config = DownConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + + with patch("app.commands.service.down.DockerService") as mock_docker_service_class: + mock_docker_service = Mock() + mock_docker_service.stop_services.return_value = (True, "") + mock_docker_service_class.return_value = mock_docker_service + + result = self.down.down(config) + + assert result.success is True + assert result.error is None + assert result.name == "web" + + def test_down_failure(self): + config = DownConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + + with patch("app.commands.service.down.DockerService") as mock_docker_service_class: + mock_docker_service = Mock() + mock_docker_service.stop_services.return_value = (False, "Service not found") + mock_docker_service_class.return_value = mock_docker_service + + result = self.down.down(config) + + assert result.success is False + assert result.error == "Service not found" + + def test_format_output(self): + result = DownResult(name="web", env_file=None, verbose=False, output="text", success=True) + + formatted = self.down.format_output(result, "text") + assert formatted == "" + + +class TestDownResult: + def test_down_result_creation(self): + result = DownResult(name="web", env_file="/path/to/.env", verbose=True, output="json", success=True, error=None) + + assert result.name == "web" + assert result.env_file == "/path/to/.env" + assert result.verbose is True + assert result.output == "json" + assert result.success is True + assert result.error is None + + def test_down_result_default_success(self): + result = DownResult(name="web", env_file=None, verbose=False, output="text") + + assert result.name == "web" + assert result.success is False + assert result.error is None diff --git a/cli/tests/commands/service/test_ps.py b/cli/tests/commands/service/test_ps.py new file mode 100644 index 00000000..2f6799c8 --- /dev/null +++ b/cli/tests/commands/service/test_ps.py @@ -0,0 +1,364 @@ +import os +import subprocess +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.service.messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_env_file, + dry_run_mode, + dry_run_service, + end_dry_run, + service_status_failed, + services_status_retrieved, + unknown_error, +) +from app.commands.service.ps import DockerCommandBuilder, DockerService, Ps, PsConfig, PsFormatter, PsResult, PsService +from app.utils.logger import Logger + + +class TestDockerCommandBuilder: + def test_build_ps_command_default(self): + cmd = DockerCommandBuilder.build_ps_command() + assert cmd == ["docker", "compose", "config", "--format", "json"] + + def test_build_ps_command_with_service_name(self): + cmd = DockerCommandBuilder.build_ps_command("web") + assert cmd == ["docker", "compose", "config", "--format", "json"] + + def test_build_ps_command_with_env_file(self): + cmd = DockerCommandBuilder.build_ps_command("all", "/path/to/.env") + assert cmd == ["docker", "compose", "config", "--format", "json", "--env-file", "/path/to/.env"] + + def test_build_ps_command_with_compose_file(self): + cmd = DockerCommandBuilder.build_ps_command("all", None, "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "config", "--format", "json"] + + def test_build_ps_command_with_all_parameters(self): + cmd = DockerCommandBuilder.build_ps_command("api", "/path/to/.env", "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "config", "--format", "json", "--env-file", "/path/to/.env"] + + +class TestPsFormatter: + def setup_method(self): + self.formatter = PsFormatter() + + def test_format_output_success(self): + result = PsResult(name="web", env_file=None, verbose=False, output="text", success=True) + formatted = self.formatter.format_output(result, "text") + assert formatted == "No configuration found" + + def test_format_output_failure(self): + result = PsResult(name="web", env_file=None, verbose=False, output="text", success=False, error="Service not found") + formatted = self.formatter.format_output(result, "text") + assert "Service not found" in formatted + + def test_format_output_json(self): + result = PsResult(name="web", env_file=None, verbose=False, output="json", success=True) + formatted = self.formatter.format_output(result, "json") + import json + + data = json.loads(formatted) + assert data["success"] is True + expected_message = services_status_retrieved.format(services="web") + assert expected_message in data["message"] + + def test_format_output_invalid(self): + result = PsResult(name="web", env_file=None, verbose=False, output="invalid", success=True) + formatted = self.formatter.format_output(result, "invalid") + assert formatted == "No configuration found" + + def test_format_dry_run_default(self): + config = PsConfig(name="all", env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_mode in formatted + assert dry_run_command in formatted + assert dry_run_service.format(service="all") in formatted + + def test_format_dry_run_with_service(self): + config = PsConfig(name="web", env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_service.format(service="web") in formatted + + def test_format_dry_run_with_env_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = PsConfig(name="all", env_file="/path/to/.env", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_env_file.format(env_file="/path/to/.env") in formatted + + def test_format_dry_run_with_compose_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = PsConfig(name="all", compose_file="/path/to/docker-compose.yml", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert "Command:" in formatted + + +class TestDockerService: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.docker_service = DockerService(self.logger) + + @patch("subprocess.run") + def test_show_services_status_success(self, mock_run): + mock_result = Mock(returncode=0, stdout="{}", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.show_services_status("web") + + assert success is True + assert error == "{}" + + @patch("subprocess.run") + def test_show_services_status_with_env_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="{}", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.show_services_status("all", "/path/to/.env") + + assert success is True + assert error == "{}" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "config", "--format", "json", "--env-file", "/path/to/.env"] + + @patch("subprocess.run") + def test_show_services_status_with_compose_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="{}", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.show_services_status("all", None, "/path/to/docker-compose.yml") + + assert success is True + assert error == "{}" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "config", "--format", "json"] + + @patch("subprocess.run") + def test_show_services_status_failure(self, mock_run): + mock_run.side_effect = subprocess.CalledProcessError(1, "docker compose ps", stderr="Service not found") + + success, error = self.docker_service.show_services_status("web") + + assert success is False + assert error == "Service not found" + expected_error = "Service ps failed: Service not found" + self.logger.error.assert_called_once_with(expected_error) + + @patch("subprocess.run") + def test_show_services_status_unexpected_error(self, mock_run): + mock_run.side_effect = Exception("Unexpected error") + + success, error = self.docker_service.show_services_status("web") + + assert success is False + assert error == "Unexpected error" + expected_error = "Unexpected error during ps: Unexpected error" + self.logger.error.assert_called_once_with(expected_error) + + +class TestPsConfig: + def test_valid_config_default(self): + config = PsConfig() + assert config.name == "all" + assert config.env_file is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.compose_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = PsConfig( + name="web", + env_file="/path/to/.env", + verbose=True, + output="json", + dry_run=True, + compose_file="/path/to/docker-compose.yml", + ) + assert config.name == "web" + assert config.env_file == "/path/to/.env" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_env_file_exists(self, mock_exists): + mock_exists.return_value = True + config = PsConfig(env_file="/path/to/.env") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + PsConfig(env_file="/path/to/.env") + + def test_validate_env_file_none(self): + config = PsConfig(env_file=None) + assert config.env_file is None + + def test_validate_env_file_empty(self): + config = PsConfig(env_file="") + assert config.env_file is None + + def test_validate_env_file_whitespace(self): + config = PsConfig(env_file=" ") + assert config.env_file is None + + def test_validate_env_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = PsConfig(env_file=" /path/to/.env ") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_compose_file_exists(self, mock_exists): + mock_exists.return_value = True + config = PsConfig(compose_file="/path/to/docker-compose.yml") + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_compose_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + PsConfig(compose_file="/path/to/docker-compose.yml") + + def test_validate_compose_file_none(self): + config = PsConfig(compose_file=None) + assert config.compose_file is None + + def test_validate_compose_file_empty(self): + config = PsConfig(compose_file="") + assert config.compose_file is None + + def test_validate_compose_file_whitespace(self): + config = PsConfig(compose_file=" ") + assert config.compose_file is None + + def test_validate_compose_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = PsConfig(compose_file=" /path/to/docker-compose.yml ") + assert config.compose_file == "/path/to/docker-compose.yml" + + +class TestPsService: + def setup_method(self): + self.config = PsConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + self.logger = Mock(spec=Logger) + self.docker_service = Mock() + self.service = PsService(self.config, self.logger, self.docker_service) + + def test_create_result_success(self): + result = self.service._create_result(True) + assert result.name == "web" + assert result.success is True + assert result.error is None + assert result.output == "text" + assert result.verbose is False + + def test_create_result_failure(self): + result = self.service._create_result(False, "Service not found") + assert result.success is False + assert result.error == "Service not found" + + def test_ps_success(self): + self.docker_service.show_services_status.return_value = (True, "{}") + + result = self.service.ps() + + assert result.success is True + assert result.error is None + self.docker_service.show_services_status.assert_called_once_with("web", None, None) + + def test_ps_failure(self): + self.docker_service.show_services_status.return_value = (False, "Service not found") + + result = self.service.ps() + + assert result.success is False + assert result.error == "Service not found" + + def test_ps_and_format_dry_run(self): + self.config.dry_run = True + formatted = self.service.ps_and_format() + assert dry_run_mode in formatted + assert dry_run_command in formatted + + def test_ps_and_format_success(self): + self.docker_service.show_services_status.return_value = (True, "{}") + formatted = self.service.ps_and_format() + assert formatted == "No services found in compose file" + + +class TestPs: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.ps = Ps(self.logger) + + def test_ps_success(self): + config = PsConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + + with patch( + "app.commands.service.ps.PsService.execute", + return_value=PsResult( + name=config.name, env_file=config.env_file, verbose=config.verbose, output=config.output, success=True + ), + ): + result = self.ps.ps(config) + assert result.success is True + + def test_ps_failure(self): + config = PsConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + + with patch( + "app.commands.service.ps.PsService.execute", + return_value=PsResult( + name=config.name, + env_file=config.env_file, + verbose=config.verbose, + output=config.output, + success=False, + error="Service not found", + ), + ): + result = self.ps.ps(config) + assert result.success is False + assert result.error == "Service not found" + + def test_format_output(self): + result = PsResult(name="web", env_file=None, verbose=False, output="text", success=True) + + formatted = self.ps.format_output(result, "text") + assert formatted == "No configuration found" + + +class TestPsResult: + def test_ps_result_creation(self): + result = PsResult(name="web", env_file="/path/to/.env", verbose=True, output="json", success=True, error=None) + + assert result.name == "web" + assert result.env_file == "/path/to/.env" + assert result.verbose is True + assert result.output == "json" + assert result.success is True + assert result.error is None + + def test_ps_result_default_success(self): + result = PsResult(name="web", env_file=None, verbose=False, output="text") + + assert result.name == "web" + assert result.success is False + assert result.error is None diff --git a/cli/tests/commands/service/test_restart.py b/cli/tests/commands/service/test_restart.py new file mode 100644 index 00000000..54bf1f6e --- /dev/null +++ b/cli/tests/commands/service/test_restart.py @@ -0,0 +1,383 @@ +import os +import subprocess +from unittest.mock import Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.service.messages import ( + dry_run_command, + dry_run_command_would_be_executed, + dry_run_env_file, + dry_run_mode, + dry_run_service, + end_dry_run, + service_restart_failed, + services_restarted_successfully, + unknown_error, +) +from app.commands.service.restart import ( + DockerCommandBuilder, + DockerService, + Restart, + RestartConfig, + RestartFormatter, + RestartResult, + RestartService, +) +from app.utils.logger import Logger + + +class TestDockerCommandBuilder: + def test_build_restart_command_default(self): + cmd = DockerCommandBuilder.build_restart_command() + assert cmd == ["docker", "compose", "restart"] + + def test_build_restart_command_with_service_name(self): + cmd = DockerCommandBuilder.build_restart_command("web") + assert cmd == ["docker", "compose", "restart", "web"] + + def test_build_restart_command_with_env_file(self): + cmd = DockerCommandBuilder.build_restart_command("all", "/path/to/.env") + assert cmd == ["docker", "compose", "restart", "--env-file", "/path/to/.env"] + + def test_build_restart_command_with_compose_file(self): + cmd = DockerCommandBuilder.build_restart_command("all", None, "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "restart"] + + def test_build_restart_command_with_all_parameters(self): + cmd = DockerCommandBuilder.build_restart_command("api", "/path/to/.env", "/path/to/docker-compose.yml") + assert cmd == [ + "docker", + "compose", + "-f", + "/path/to/docker-compose.yml", + "restart", + "--env-file", + "/path/to/.env", + "api", + ] + + +class TestRestartFormatter: + def setup_method(self): + self.formatter = RestartFormatter() + + def test_format_output_success(self): + result = RestartResult(name="web", env_file=None, verbose=False, output="text", success=True) + formatted = self.formatter.format_output(result, "text") + assert formatted == "" + + def test_format_output_failure(self): + result = RestartResult( + name="web", env_file=None, verbose=False, output="text", success=False, error="Service not found" + ) + formatted = self.formatter.format_output(result, "text") + assert "Service not found" in formatted + + def test_format_output_json(self): + result = RestartResult(name="web", env_file=None, verbose=False, output="json", success=True) + formatted = self.formatter.format_output(result, "json") + import json + + data = json.loads(formatted) + assert data["success"] is True + expected_message = services_restarted_successfully.format(services="web") + assert expected_message in data["message"] + + def test_format_output_invalid(self): + result = RestartResult(name="web", env_file=None, verbose=False, output="invalid", success=True) + formatted = self.formatter.format_output(result, "invalid") + assert formatted == "" + + def test_format_dry_run_default(self): + config = RestartConfig(name="all", env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_mode in formatted + assert dry_run_command in formatted + assert dry_run_service.format(service="all") in formatted + + def test_format_dry_run_with_service(self): + config = RestartConfig(name="web", env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_service.format(service="web") in formatted + + def test_format_dry_run_with_env_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = RestartConfig(name="all", env_file="/path/to/.env", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_env_file.format(env_file="/path/to/.env") in formatted + + def test_format_dry_run_with_compose_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = RestartConfig(name="all", compose_file="/path/to/docker-compose.yml", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert "Command:" in formatted + + +class TestDockerService: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.docker_service = DockerService(self.logger) + + @patch("subprocess.run") + def test_restart_services_success(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.restart_services("web") + + assert success is True + assert error == "" + + @patch("subprocess.run") + def test_restart_services_with_env_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.restart_services("all", "/path/to/.env") + + assert success is True + assert error == "" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "restart", "--env-file", "/path/to/.env"] + + @patch("subprocess.run") + def test_restart_services_with_compose_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.restart_services("all", None, "/path/to/docker-compose.yml") + + assert success is True + assert error == "" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "restart"] + + @patch("subprocess.run") + def test_restart_services_failure(self, mock_run): + mock_run.side_effect = subprocess.CalledProcessError(1, "docker compose restart", stderr="Service not found") + + success, error = self.docker_service.restart_services("web") + + assert success is False + assert error == "Service not found" + expected_error = "Service restart failed: Service not found" + self.logger.error.assert_called_once_with(expected_error) + + @patch("subprocess.run") + def test_restart_services_unexpected_error(self, mock_run): + mock_run.side_effect = Exception("Unexpected error") + + success, error = self.docker_service.restart_services("web") + + assert success is False + assert error == "Unexpected error" + expected_error = "Unexpected error during restart: Unexpected error" + self.logger.error.assert_called_once_with(expected_error) + + +class TestRestartConfig: + def test_valid_config_default(self): + config = RestartConfig() + assert config.name == "all" + assert config.env_file is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + assert config.compose_file is None + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = RestartConfig( + name="web", + env_file="/path/to/.env", + verbose=True, + output="json", + dry_run=True, + compose_file="/path/to/docker-compose.yml", + ) + assert config.name == "web" + assert config.env_file == "/path/to/.env" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_env_file_exists(self, mock_exists): + mock_exists.return_value = True + config = RestartConfig(env_file="/path/to/.env") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + RestartConfig(env_file="/path/to/.env") + + def test_validate_env_file_none(self): + config = RestartConfig(env_file=None) + assert config.env_file is None + + def test_validate_env_file_empty(self): + config = RestartConfig(env_file="") + assert config.env_file is None + + def test_validate_env_file_whitespace(self): + config = RestartConfig(env_file=" ") + assert config.env_file is None + + def test_validate_env_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = RestartConfig(env_file=" /path/to/.env ") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_compose_file_exists(self, mock_exists): + mock_exists.return_value = True + config = RestartConfig(compose_file="/path/to/docker-compose.yml") + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_compose_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + RestartConfig(compose_file="/path/to/docker-compose.yml") + + def test_validate_compose_file_none(self): + config = RestartConfig(compose_file=None) + assert config.compose_file is None + + def test_validate_compose_file_empty(self): + config = RestartConfig(compose_file="") + assert config.compose_file is None + + def test_validate_compose_file_whitespace(self): + config = RestartConfig(compose_file=" ") + assert config.compose_file is None + + def test_validate_compose_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = RestartConfig(compose_file=" /path/to/docker-compose.yml ") + assert config.compose_file == "/path/to/docker-compose.yml" + + +class TestRestartService: + def setup_method(self): + self.config = RestartConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + self.logger = Mock(spec=Logger) + self.docker_service = Mock() + self.service = RestartService(self.config, self.logger, self.docker_service) + + def test_create_result_success(self): + result = self.service._create_result(True) + assert result.name == "web" + assert result.success is True + assert result.error is None + assert result.output == "text" + assert result.verbose is False + + def test_create_result_failure(self): + result = self.service._create_result(False, "Service not found") + assert result.success is False + assert result.error == "Service not found" + + def test_restart_success(self): + self.docker_service.restart_services.return_value = (True, "") + + result = self.service.restart() + + assert result.success is True + assert result.error is None + self.docker_service.restart_services.assert_called_once_with("web", None, None) + + def test_restart_failure(self): + self.docker_service.restart_services.return_value = (False, "Service not found") + + result = self.service.restart() + + assert result.success is False + assert result.error == "Service not found" + + def test_restart_and_format_dry_run(self): + self.config.dry_run = True + formatted = self.service.restart_and_format() + assert dry_run_mode in formatted + assert dry_run_command in formatted + + def test_restart_and_format_success(self): + self.docker_service.restart_services.return_value = (True, "") + formatted = self.service.restart_and_format() + assert formatted == "" + + +class TestRestart: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.restart = Restart(self.logger) + + def test_restart_success(self): + config = RestartConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + + with patch( + "app.commands.service.restart.RestartService.execute", + return_value=RestartResult( + name=config.name, env_file=config.env_file, verbose=config.verbose, output=config.output, success=True + ), + ): + result = self.restart.restart(config) + assert result.success is True + + def test_restart_failure(self): + config = RestartConfig(name="web", env_file=None, verbose=False, output="text", dry_run=False) + + with patch( + "app.commands.service.restart.RestartService.execute", + return_value=RestartResult( + name=config.name, + env_file=config.env_file, + verbose=config.verbose, + output=config.output, + success=False, + error="Service not found", + ), + ): + result = self.restart.restart(config) + assert result.success is False + assert result.error == "Service not found" + + def test_format_output(self): + result = RestartResult(name="web", env_file=None, verbose=False, output="text", success=True) + + formatted = self.restart.format_output(result, "text") + assert formatted == "" + + +class TestRestartResult: + def test_restart_result_creation(self): + result = RestartResult(name="web", env_file="/path/to/.env", verbose=True, output="json", success=True, error=None) + + assert result.name == "web" + assert result.env_file == "/path/to/.env" + assert result.verbose is True + assert result.output == "json" + assert result.success is True + assert result.error is None + + def test_restart_result_default_success(self): + result = RestartResult(name="web", env_file=None, verbose=False, output="text") + + assert result.name == "web" + assert result.success is False + assert result.error is None diff --git a/cli/tests/commands/service/test_up.py b/cli/tests/commands/service/test_up.py new file mode 100644 index 00000000..45ea9c4e --- /dev/null +++ b/cli/tests/commands/service/test_up.py @@ -0,0 +1,361 @@ +import os +import subprocess +from unittest.mock import MagicMock, Mock, patch + +import pytest +from pydantic import ValidationError + +from app.commands.service.messages import ( + dry_run_command, + dry_run_detach_mode, + dry_run_env_file, + dry_run_mode, + dry_run_service, + services_started_successfully, +) +from app.commands.service.up import DockerCommandBuilder, DockerService, Up, UpConfig, UpFormatter, UpResult, UpService +from app.utils.logger import Logger + + +class TestDockerCommandBuilder: + def test_build_up_command_default(self): + cmd = DockerCommandBuilder.build_up_command() + assert cmd == ["docker", "compose", "up", "-d"] + + def test_build_up_command_with_service_name(self): + cmd = DockerCommandBuilder.build_up_command("web") + assert cmd == ["docker", "compose", "up", "-d", "web"] + + def test_build_up_command_without_detach(self): + cmd = DockerCommandBuilder.build_up_command("all", detach=False) + assert cmd == ["docker", "compose", "up"] + + def test_build_up_command_with_env_file(self): + cmd = DockerCommandBuilder.build_up_command("all", True, "/path/to/.env") + assert cmd == ["docker", "compose", "up", "-d", "--env-file", "/path/to/.env"] + + def test_build_up_command_with_compose_file(self): + cmd = DockerCommandBuilder.build_up_command("all", True, None, "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "up", "-d"] + + def test_build_up_command_with_all_parameters(self): + cmd = DockerCommandBuilder.build_up_command("api", False, "/path/to/.env", "/path/to/docker-compose.yml") + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "up", "--env-file", "/path/to/.env", "api"] + + +class TestUpFormatter: + def setup_method(self): + self.formatter = UpFormatter() + + def test_format_output_success(self): + result = UpResult(name="web", detach=True, env_file=None, verbose=False, output="text", success=True) + formatted = self.formatter.format_output(result, "text") + assert formatted == "" + + def test_format_output_failure(self): + result = UpResult( + name="web", detach=True, env_file=None, verbose=False, output="text", success=False, error="Service not found" + ) + formatted = self.formatter.format_output(result, "text") + assert "Service not found" in formatted + + def test_format_output_json(self): + result = UpResult(name="web", detach=True, env_file=None, verbose=False, output="json", success=True) + formatted = self.formatter.format_output(result, "json") + import json + + data = json.loads(formatted) + assert data["success"] is True + expected_message = services_started_successfully.format(services="web") + assert expected_message in data["message"] + + def test_format_output_invalid(self): + result = UpResult(name="web", detach=True, env_file=None, verbose=False, output="invalid", success=True) + # The formatter doesn't validate output format, so no ValueError is raised + formatted = self.formatter.format_output(result, "invalid") + assert formatted == "" + + def test_format_dry_run_default(self): + config = UpConfig(name="all", detach=True, env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_mode in formatted + assert dry_run_command in formatted + assert dry_run_service.format(service="all") in formatted + assert dry_run_detach_mode.format(detach=True) in formatted + + def test_format_dry_run_with_service(self): + config = UpConfig(name="web", detach=False, env_file=None, dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_service.format(service="web") in formatted + assert dry_run_detach_mode.format(detach=False) in formatted + + def test_format_dry_run_with_env_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = UpConfig(name="all", detach=True, env_file="/path/to/.env", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert dry_run_env_file.format(env_file="/path/to/.env") in formatted + + def test_format_dry_run_with_compose_file(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = UpConfig(name="all", detach=True, compose_file="/path/to/docker-compose.yml", dry_run=True) + formatted = self.formatter.format_dry_run(config) + assert dry_run_command in formatted + assert "Command:" in formatted + + +class TestDockerService: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.docker_service = DockerService(self.logger) + + @patch("subprocess.run") + def test_start_services_success(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.start_services("web", detach=True) + + assert success is True + assert error == "" + + @patch("subprocess.run") + def test_start_services_with_env_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.start_services("all", True, "/path/to/.env") + + assert success is True + assert error == "" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "up", "-d", "--env-file", "/path/to/.env"] + + @patch("subprocess.run") + def test_start_services_with_compose_file(self, mock_run): + mock_result = Mock(returncode=0, stdout="", stderr="") + mock_run.return_value = mock_result + + success, error = self.docker_service.start_services("all", True, None, "/path/to/docker-compose.yml") + + assert success is True + assert error == "" + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert cmd == ["docker", "compose", "-f", "/path/to/docker-compose.yml", "up", "-d"] + + @patch("subprocess.run") + def test_start_services_failure(self, mock_run): + mock_run.side_effect = subprocess.CalledProcessError(1, "docker compose", stderr="Service not found") + success, error = self.docker_service.start_services("web", detach=True) + assert success is False + assert error == "Service not found" + + @patch("subprocess.run") + def test_start_services_unexpected_error(self, mock_run): + mock_run.side_effect = Exception("Unexpected error") + success, error = self.docker_service.start_services("web", detach=True) + assert success is False + assert error == "Unexpected error" + + +class TestUpConfig: + def test_valid_config_default(self): + config = UpConfig() + assert config.name == "all" + assert config.detach is False + assert config.env_file is None + assert config.verbose is False + assert config.output == "text" + assert config.dry_run is False + + def test_valid_config_custom(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = UpConfig( + name="web", + detach=False, + env_file="/path/to/.env", + verbose=True, + output="json", + dry_run=True, + compose_file="/path/to/docker-compose.yml", + ) + assert config.name == "web" + assert config.detach is False + assert config.env_file == "/path/to/.env" + assert config.verbose is True + assert config.output == "json" + assert config.dry_run is True + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_env_file_exists(self, mock_exists): + mock_exists.return_value = True + config = UpConfig(env_file="/path/to/.env") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_env_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + UpConfig(env_file="/path/to/.env") + + def test_validate_env_file_none(self): + config = UpConfig(env_file=None) + assert config.env_file is None + + def test_validate_env_file_empty(self): + config = UpConfig(env_file="") + assert config.env_file is None + + def test_validate_env_file_whitespace(self): + config = UpConfig(env_file=" ") + assert config.env_file is None + + def test_validate_env_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = UpConfig(env_file=" /path/to/.env ") + assert config.env_file == "/path/to/.env" + + @patch("os.path.exists") + def test_validate_compose_file_exists(self, mock_exists): + mock_exists.return_value = True + config = UpConfig(compose_file="/path/to/docker-compose.yml") + assert config.compose_file == "/path/to/docker-compose.yml" + + @patch("os.path.exists") + def test_validate_compose_file_not_exists(self, mock_exists): + mock_exists.return_value = False + with pytest.raises(ValidationError): + UpConfig(compose_file="/path/to/docker-compose.yml") + + def test_validate_compose_file_none(self): + config = UpConfig(compose_file=None) + assert config.compose_file is None + + def test_validate_compose_file_empty(self): + config = UpConfig(compose_file="") + assert config.compose_file is None + + def test_validate_compose_file_whitespace(self): + config = UpConfig(compose_file=" ") + assert config.compose_file is None + + def test_validate_compose_file_stripped(self): + with patch("os.path.exists") as mock_exists: + mock_exists.return_value = True + config = UpConfig(compose_file=" /path/to/docker-compose.yml ") + assert config.compose_file == "/path/to/docker-compose.yml" + + +class TestUpService: + def setup_method(self): + self.config = UpConfig(name="web", detach=True, env_file=None, compose_file=None) + self.logger = Mock(spec=Logger) + self.docker_service = Mock() + self.service = UpService(self.config, self.logger, self.docker_service) + + def test_create_result_success(self): + result = self.service._create_result(True) + + assert result.name == self.config.name + assert result.detach == self.config.detach + assert result.env_file == self.config.env_file + assert result.verbose == self.config.verbose + assert result.output == self.config.output + assert result.success is True + assert result.error is None + + def test_create_result_failure(self): + result = self.service._create_result(False, "Test error") + + assert result.success is False + assert result.error == "Test error" + + def test_up_success(self): + self.docker_service.start_services.return_value = (True, None) + + result = self.service.up() + + assert result.success is True + self.docker_service.start_services.assert_called_once_with( + self.config.name, self.config.detach, self.config.env_file, self.config.compose_file + ) + + def test_up_failure(self): + self.docker_service.start_services.return_value = (False, "Test error") + + result = self.service.up() + + assert result.success is False + assert result.error == "Test error" + + def test_up_and_format_dry_run(self): + self.config.dry_run = True + + result = self.service.up_and_format() + + assert dry_run_mode in result + + def test_up_and_format_success(self): + self.docker_service.start_services.return_value = (True, "") + + result = self.service.up_and_format() + + assert result == "" + + +class TestUp: + def setup_method(self): + self.logger = Mock(spec=Logger) + self.up = Up(self.logger) + + def test_up_success(self): + config = UpConfig(name="web", detach=True, env_file=None) + with patch( + "app.commands.service.up.UpService.execute", + return_value=UpResult( + name=config.name, + detach=config.detach, + env_file=config.env_file, + verbose=config.verbose, + output=config.output, + success=True, + ), + ): + result = self.up.up(config) + assert result.success is True + + def test_format_output(self): + result = UpResult(name="web", detach=True, env_file=None, verbose=False, output="text", success=True) + + formatted = self.up.format_output(result, "text") + + assert formatted == "" + + +class TestUpResult: + def test_up_result_creation(self): + result = UpResult( + name="web", detach=True, env_file="/path/to/.env", verbose=True, output="json", success=True, error=None + ) + + assert result.name == "web" + assert result.detach is True + assert result.env_file == "/path/to/.env" + assert result.verbose is True + assert result.output == "json" + assert result.success is True + assert result.error is None + + def test_up_result_default_success(self): + result = UpResult(name="web", detach=True, env_file=None, verbose=False, output="text") + + assert result.success is False + assert result.error is None diff --git a/cli/tests/commands/version/__init__.py b/cli/tests/commands/version/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/commands/version/test_version.py b/cli/tests/commands/version/test_version.py new file mode 100644 index 00000000..2eaf4f6d --- /dev/null +++ b/cli/tests/commands/version/test_version.py @@ -0,0 +1,178 @@ +from importlib.metadata import version +from unittest.mock import MagicMock, patch + +import pytest + +from app.commands.version.version import VersionCommand + + +class TestVersionCommand: + """Test cases for the VersionCommand class""" + + @patch("app.commands.version.version.Console") + @patch("app.commands.version.version.version") + def test_version_command_success(self, mock_version, mock_console_class): + """Test successful version display""" + mock_version.return_value = "1.0.0" + mock_console = MagicMock() + mock_console_class.return_value = mock_console + + version_command = VersionCommand() + version_command.run() + + mock_version.assert_called_once_with("nixopus") + mock_console.print.assert_called_once() + + call_args = mock_console.print.call_args[0][0] + assert call_args.title == "[bold white]Version Info[/bold white]" + assert call_args.border_style == "blue" + assert call_args.padding == (0, 1) + + @patch("app.commands.version.version.Console") + @patch("app.commands.version.version.version") + def test_version_command_with_different_versions(self, mock_version, mock_console_class): + """Test version display with different version numbers""" + test_versions = ["0.1.0", "2.3.4", "1.0.0-beta"] + mock_console = MagicMock() + mock_console_class.return_value = mock_console + + for test_version in test_versions: + mock_version.return_value = test_version + mock_console.reset_mock() + + version_command = VersionCommand() + version_command.run() + + mock_version.assert_called_with("nixopus") + mock_console.print.assert_called_once() + + @patch("app.commands.version.version.Console") + @patch("app.commands.version.version.version") + def test_version_command_panel_content(self, mock_version, mock_console_class): + """Test that panel contains correct text content""" + mock_version.return_value = "1.2.3" + mock_console = MagicMock() + mock_console_class.return_value = mock_console + + version_command = VersionCommand() + version_command.run() + + call_args = mock_console.print.call_args[0][0] + panel_content = call_args.renderable + + assert "Nixopus CLI" in str(panel_content) + assert "v1.2.3" in str(panel_content) + + @patch("app.commands.version.version.Console") + @patch("app.commands.version.version.version") + def test_version_command_handles_version_error(self, mock_version, mock_console_class): + """Test handling of version import error""" + mock_version.side_effect = Exception("Version not found") + mock_console = MagicMock() + mock_console_class.return_value = mock_console + + with pytest.raises(Exception): + version_command = VersionCommand() + version_command.run() + + mock_version.assert_called_once_with("nixopus") + + @patch("app.commands.version.version.Console") + @patch("app.commands.version.version.version") + def test_version_command_console_error_handling(self, mock_version, mock_console_class): + """Test handling of console print errors""" + mock_version.return_value = "1.0.0" + mock_console = MagicMock() + mock_console.print.side_effect = Exception("Console error") + mock_console_class.return_value = mock_console + + with pytest.raises(Exception): + version_command = VersionCommand() + version_command.run() + + mock_version.assert_called_once_with("nixopus") + mock_console.print.assert_called_once() + + +class TestVersionCommandClass: + """Test cases for VersionCommand class structure""" + + def test_version_command_initialization(self): + """Test that VersionCommand can be instantiated""" + with patch("app.commands.version.version.Console"): + version_command = VersionCommand() + assert hasattr(version_command, "console") + + def test_version_command_run_method(self): + """Test that VersionCommand has a run method""" + with patch("app.commands.version.version.Console"): + version_command = VersionCommand() + assert hasattr(version_command, "run") + assert callable(version_command.run) + + def test_version_command_run_returns_none(self): + """Test that run method returns None""" + with patch("app.commands.version.version.Console"): + with patch("app.commands.version.version.version", return_value="1.0.0"): + version_command = VersionCommand() + result = version_command.run() + assert result is None + + +class TestVersionModuleImports: + """Test cases for module imports and dependencies""" + + def test_import_metadata_version(self): + """Test that importlib.metadata.version is available""" + try: + from importlib.metadata import version + + assert callable(version) + except ImportError: + pytest.skip("importlib.metadata not available") + + def test_rich_console_import(self): + """Test that rich.console.Console is available""" + try: + from rich.console import Console + + assert callable(Console) + except ImportError: + pytest.skip("rich.console not available") + + def test_rich_panel_import(self): + """Test that rich.panel.Panel is available""" + try: + from rich.panel import Panel + + assert callable(Panel) + except ImportError: + pytest.skip("rich.panel not available") + + def test_rich_text_import(self): + """Test that rich.text.Text is available""" + try: + from rich.text import Text + + assert callable(Text) + except ImportError: + pytest.skip("rich.text not available") + + +class TestVersionCommandSignature: + """Test cases for class method signature and behavior""" + + def test_version_command_is_instantiable(self): + """Test that VersionCommand can be instantiated""" + with patch("app.commands.version.version.Console"): + version_command = VersionCommand() + assert isinstance(version_command, VersionCommand) + + def test_run_method_no_parameters(self): + """Test that run method takes no parameters""" + import inspect + + with patch("app.commands.version.version.Console"): + version_command = VersionCommand() + sig = inspect.signature(version_command.run) + assert len(sig.parameters) == 0 diff --git a/cli/tests/utils/__init__.py b/cli/tests/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli/tests/utils/test_config.py b/cli/tests/utils/test_config.py new file mode 100644 index 00000000..20453402 --- /dev/null +++ b/cli/tests/utils/test_config.py @@ -0,0 +1,406 @@ +import os +import sys +import tempfile +import unittest +from unittest.mock import Mock, patch, mock_open + +from app.utils.config import Config, expand_env_placeholders +from app.utils.message import MISSING_CONFIG_KEY_MESSAGE + +class TestConfig(unittest.TestCase): + def setUp(self): + self.temp_dir = tempfile.mkdtemp() + self.test_config_path = os.path.join(self.temp_dir, "test_config.yaml") + self.sample_config = { + "services": { + "api": { + "env": { + "PORT": "${API_PORT:-8443}", + "DB_NAME": "${DB_NAME:-postgres}" + } + }, + "view": { + "env": { + "PORT": "${VIEW_PORT:-7443}" + } + } + }, + "clone": { + "repo": "https://github.com/test/repo", + "branch": "main", + "source-path": "/tmp/source" + }, + "deps": { + "curl": {"package": "curl", "command": "curl"}, + "docker": {"package": "docker.io", "command": "docker"} + }, + "ports": [2019, 80, 443, 7443, 8443] + } + + def tearDown(self): + import shutil + shutil.rmtree(self.temp_dir, ignore_errors=True) + + def test_get_env_default(self): + if "ENV" in os.environ: + del os.environ["ENV"] + config = Config() + self.assertEqual(config.get_env(), "PRODUCTION") + + @patch('os.environ.get') + def test_get_env_custom(self, mock_environ_get): + mock_environ_get.return_value = "DEVELOPMENT" + config = Config() + self.assertEqual(config.get_env(), "DEVELOPMENT") + + @patch('os.environ.get') + def test_is_development_true(self, mock_environ_get): + mock_environ_get.return_value = "DEVELOPMENT" + config = Config() + self.assertTrue(config.is_development()) + + @patch('os.environ.get') + def test_is_development_false(self, mock_environ_get): + mock_environ_get.return_value = "PRODUCTION" + config = Config() + self.assertFalse(config.is_development()) + + @patch('os.environ.get') + def test_is_development_case_insensitive(self, mock_environ_get): + mock_environ_get.return_value = "development" + config = Config() + self.assertTrue(config.is_development()) + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + def test_load_yaml_config_success(self, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + config = Config() + result = config.load_yaml_config() + self.assertEqual(result, self.sample_config) + mock_file.assert_called_once() + + @patch('builtins.open') + def test_load_yaml_config_file_not_found(self, mock_open): + mock_open.side_effect = FileNotFoundError("File not found") + config = Config() + with self.assertRaises(FileNotFoundError): + config.load_yaml_config() + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + def test_load_yaml_config_cached(self, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + config = Config() + result1 = config.load_yaml_config() + result2 = config.load_yaml_config() + self.assertEqual(result1, result2) + self.assertEqual(mock_yaml_load.call_count, 1) + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + @patch('app.utils.config.expand_env_placeholders') + def test_get_yaml_value_success(self, mock_expand, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + mock_expand.return_value = "8443" + config = Config() + result = config.get_yaml_value("services.api.env.PORT") + self.assertEqual(result, "8443") + mock_expand.assert_called_once_with("${API_PORT:-8443}") + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + def test_get_yaml_value_non_string(self, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + config = Config() + result = config.get_yaml_value("ports") + self.assertEqual(result, [2019, 80, 443, 7443, 8443]) + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + def test_get_yaml_value_missing_key(self, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + config = Config() + with self.assertRaises(KeyError) as context: + config.get_yaml_value("services.api.env.NONEXISTENT") + expected_message = MISSING_CONFIG_KEY_MESSAGE.format( + path="services.api.env.NONEXISTENT", + key="NONEXISTENT" + ) + self.assertEqual(context.exception.args[0], expected_message) + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + def test_get_yaml_value_missing_path(self, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + config = Config() + with self.assertRaises(KeyError) as context: + config.get_yaml_value("nonexistent.path") + expected_message = MISSING_CONFIG_KEY_MESSAGE.format( + path="nonexistent.path", + key="nonexistent" + ) + self.assertEqual(context.exception.args[0], expected_message) + + @patch('builtins.open', new_callable=mock_open) + @patch('yaml.safe_load') + @patch('app.utils.config.expand_env_placeholders') + def test_get_service_env_values(self, mock_expand, mock_yaml_load, mock_file): + mock_yaml_load.return_value = self.sample_config + mock_expand.side_effect = lambda x: x.replace("${API_PORT:-8443}", "8443") + config = Config() + result = config.get_service_env_values("services.api.env") + expected = { + "PORT": "8443", + "DB_NAME": "${DB_NAME:-postgres}" + } + self.assertEqual(result, expected) + + @patch('yaml.safe_load') + def test_load_user_config_success(self, mock_yaml_load): + user_config = { + "services": { + "api": { + "env": { + "PORT": "9000" + } + } + } + } + mock_yaml_load.return_value = user_config + with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f: + f.write("dummy content") + config_file = f.name + try: + config = Config() + result = config.load_user_config(config_file) + expected = { + "services.api.env.PORT": "9000" + } + self.assertEqual(result, expected) + finally: + os.unlink(config_file) + + def test_load_user_config_empty_file(self): + config = Config() + result = config.load_user_config(None) + self.assertEqual(result, {}) + + def test_load_user_config_file_not_found(self): + config = Config() + with self.assertRaises(FileNotFoundError) as context: + config.load_user_config("/nonexistent/file.yaml") + self.assertIn("Config file not found", str(context.exception)) + + def test_flatten_config_simple(self): + config = Config() + nested = {"a": 1, "b": 2} + flattened = {} + config.flatten_config(nested, flattened) + self.assertEqual(flattened, {"a": 1, "b": 2}) + + def test_flatten_config_nested(self): + config = Config() + nested = { + "services": { + "api": { + "env": { + "PORT": "8443" + } + } + } + } + flattened = {} + config.flatten_config(nested, flattened) + expected = { + "services.api.env.PORT": "8443" + } + self.assertEqual(flattened, expected) + + def test_flatten_config_with_prefix(self): + config = Config() + nested = {"a": 1} + flattened = {} + config.flatten_config(nested, flattened, "prefix") + self.assertEqual(flattened, {"prefix.a": 1}) + + def test_unflatten_config_simple(self): + config = Config() + flattened = {"a": 1, "b": 2} + result = config.unflatten_config(flattened) + expected = {"a": 1, "b": 2} + self.assertEqual(result, expected) + + def test_unflatten_config_nested(self): + config = Config() + flattened = {"a.b.c": 1, "a.b.d": 2, "a.e": 3, "f": 4} + result = config.unflatten_config(flattened) + expected = {"a": {"b": {"c": 1, "d": 2}, "e": 3}, "f": 4} + self.assertEqual(result, expected) + + def test_get_config_value_cached(self): + config = Config() + user_config = {"test.key": "value"} + defaults = {"test.key": "default"} + result1 = config.get_config_value("test.key", user_config, defaults) + result2 = config.get_config_value("test.key", user_config, defaults) + self.assertEqual(result1, "value") + self.assertEqual(result2, "value") + self.assertEqual(result1, result2) + + def test_get_config_value_user_config_priority(self): + config = Config() + user_config = {"services.caddy.env.PROXY_PORT": "2020"} + defaults = {"proxy_port": "2019"} + result = config.get_config_value("proxy_port", user_config, defaults) + self.assertEqual(result, "2020") + + def test_get_config_value_defaults_fallback(self): + config = Config() + user_config = {} + defaults = {"proxy_port": "2019"} + result = config.get_config_value("proxy_port", user_config, defaults) + self.assertEqual(result, "2019") + + def test_get_config_value_missing_no_default(self): + config = Config() + user_config = {} + defaults = {} + with self.assertRaises(ValueError) as context: + config.get_config_value("missing_key", user_config, defaults) + self.assertIn("Configuration key 'missing_key' has no default value", str(context.exception)) + + def test_get_config_value_ssh_passphrase_optional(self): + config = Config() + user_config = {} + defaults = {} + result = config.get_config_value("ssh_passphrase", user_config, defaults) + self.assertIsNone(result) + + def test_get_config_value_key_mappings(self): + config = Config() + user_config = { + "clone.repo": "https://github.com/test/repo", + "clone.branch": "main", + "clone.source-path": "/tmp/source" + } + defaults = {} + repo_result = config.get_config_value("repo_url", user_config, defaults) + branch_result = config.get_config_value("branch_name", user_config, defaults) + path_result = config.get_config_value("source_path", user_config, defaults) + self.assertEqual(repo_result, "https://github.com/test/repo") + self.assertEqual(branch_result, "main") + self.assertEqual(path_result, "/tmp/source") + + def test_config_pyinstaller_bundle(self): + sys.frozen = True + sys._MEIPASS = "/bundle" + with patch('os.path.join') as mock_join: + mock_join.return_value = "/bundle/helpers/config.prod.yaml" + config = Config() + self.assertEqual(config._yaml_path, "/bundle/helpers/config.prod.yaml") + del sys.frozen + del sys._MEIPASS + + def test_config_normal_python(self): + if hasattr(sys, 'frozen'): + del sys.frozen + if hasattr(sys, '_MEIPASS'): + del sys._MEIPASS + with patch('os.path.abspath') as mock_abspath: + mock_abspath.return_value = "/normal/path/helpers/config.prod.yaml" + config = Config() + self.assertNotIn("_MEIPASS", config._yaml_path) + +class TestExpandEnvPlaceholders(unittest.TestCase): + def setUp(self): + self.original_environ = os.environ.copy() + + def tearDown(self): + os.environ.clear() + os.environ.update(self.original_environ) + + def test_expand_env_placeholders_no_placeholders(self): + result = expand_env_placeholders("simple string") + self.assertEqual(result, "simple string") + + def test_expand_env_placeholders_simple_variable(self): + os.environ["TEST_VAR"] = "test_value" + result = expand_env_placeholders("${TEST_VAR}") + self.assertEqual(result, "test_value") + + def test_expand_env_placeholders_with_default(self): + result = expand_env_placeholders("${TEST_VAR:-default_value}") + self.assertEqual(result, "default_value") + + def test_expand_env_placeholders_variable_overrides_default(self): + os.environ["TEST_VAR"] = "actual_value" + result = expand_env_placeholders("${TEST_VAR:-default_value}") + self.assertEqual(result, "actual_value") + + def test_expand_env_placeholders_multiple_placeholders(self): + os.environ["VAR1"] = "value1" + os.environ["VAR2"] = "value2" + result = expand_env_placeholders("${VAR1} and ${VAR2}") + self.assertEqual(result, "value1 and value2") + + def test_expand_env_placeholders_mixed_content(self): + os.environ["PORT"] = "8443" + result = expand_env_placeholders("http://localhost:${PORT:-8080}/api") + self.assertEqual(result, "http://localhost:8443/api") + + def test_expand_env_placeholders_empty_default(self): + result = expand_env_placeholders("${TEST_VAR:-}") + self.assertEqual(result, "") + + def test_expand_env_placeholders_complex_default(self): + result = expand_env_placeholders("${TEST_VAR:-http://localhost:8080}") + self.assertEqual(result, "http://localhost:8080") + + def test_expand_env_placeholders_special_characters_in_default(self): + result = expand_env_placeholders("${TEST_VAR:-/path/with/special/chars}") + self.assertEqual(result, "/path/with/special/chars") + + def test_expand_env_placeholders_numeric_default(self): + result = expand_env_placeholders("${TEST_VAR:-123}") + self.assertEqual(result, "123") + + def test_expand_env_placeholders_underscore_in_variable_name(self): + os.environ["TEST_VAR_NAME"] = "test_value" + result = expand_env_placeholders("${TEST_VAR_NAME}") + self.assertEqual(result, "test_value") + + def test_expand_env_placeholders_case_sensitive(self): + os.environ["test_var"] = "lowercase" + os.environ["TEST_VAR"] = "uppercase" + result = expand_env_placeholders("${test_var} and ${TEST_VAR}") + self.assertEqual(result, "lowercase and uppercase") + + def test_expand_env_placeholders_invalid_variable_name(self): + result = expand_env_placeholders("${123INVALID}") + self.assertEqual(result, "${123INVALID}") + + def test_expand_env_placeholders_malformed_placeholder(self): + result = expand_env_placeholders("${MISSING_BRACE") + self.assertEqual(result, "${MISSING_BRACE") + + def test_expand_env_placeholders_empty_variable_name(self): + result = expand_env_placeholders("${}") + self.assertEqual(result, "${}") + + def test_expand_env_placeholders_nested_braces(self): + result = expand_env_placeholders("${TEST_VAR:-{nested}}") + self.assertEqual(result, "{nested}") + + def test_expand_env_placeholders_multiple_defaults(self): + result = expand_env_placeholders("${VAR1:-default1} and ${VAR2:-default2}") + self.assertEqual(result, "default1 and default2") + + def test_expand_env_placeholders_real_world_example(self): + os.environ["API_PORT"] = "9000" + os.environ["DB_NAME"] = "production_db" + result = expand_env_placeholders("${API_PORT:-8443} and ${DB_NAME:-postgres}") + self.assertEqual(result, "9000 and production_db") + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/cli/tests/utils/test_lib.py b/cli/tests/utils/test_lib.py new file mode 100644 index 00000000..5241d852 --- /dev/null +++ b/cli/tests/utils/test_lib.py @@ -0,0 +1,533 @@ +import os +import platform +import shutil +import stat +import tempfile +import unittest +from unittest.mock import Mock, patch, mock_open +import requests + +from app.utils.lib import ( + SupportedOS, + SupportedDistribution, + SupportedPackageManager, + Supported, + HostInformation, + ParallelProcessor, + DirectoryManager, + FileManager, +) +from app.utils.message import ( + FAILED_TO_GET_PUBLIC_IP_MESSAGE, + FAILED_TO_REMOVE_DIRECTORY_MESSAGE, + REMOVED_DIRECTORY_MESSAGE, +) + + +class TestSupportedOS(unittest.TestCase): + def test_supported_os_values(self): + self.assertEqual(SupportedOS.LINUX.value, "linux") + self.assertEqual(SupportedOS.MACOS.value, "darwin") + + +class TestSupportedDistribution(unittest.TestCase): + def test_supported_distribution_values(self): + self.assertEqual(SupportedDistribution.DEBIAN.value, "debian") + self.assertEqual(SupportedDistribution.UBUNTU.value, "ubuntu") + self.assertEqual(SupportedDistribution.CENTOS.value, "centos") + self.assertEqual(SupportedDistribution.FEDORA.value, "fedora") + self.assertEqual(SupportedDistribution.ALPINE.value, "alpine") + + +class TestSupportedPackageManager(unittest.TestCase): + def test_supported_package_manager_values(self): + self.assertEqual(SupportedPackageManager.APT.value, "apt") + self.assertEqual(SupportedPackageManager.YUM.value, "yum") + self.assertEqual(SupportedPackageManager.DNF.value, "dnf") + self.assertEqual(SupportedPackageManager.PACMAN.value, "pacman") + self.assertEqual(SupportedPackageManager.APK.value, "apk") + self.assertEqual(SupportedPackageManager.BREW.value, "brew") + + +class TestSupported(unittest.TestCase): + def test_os_supported(self): + self.assertTrue(Supported.os("linux")) + self.assertTrue(Supported.os("darwin")) + + def test_os_not_supported(self): + self.assertFalse(Supported.os("windows")) + self.assertFalse(Supported.os("freebsd")) + self.assertFalse(Supported.os("")) + + def test_os_case_sensitive(self): + self.assertFalse(Supported.os("Linux")) + self.assertFalse(Supported.os("DARWIN")) + + def test_distribution_supported(self): + self.assertTrue(Supported.distribution("debian")) + self.assertTrue(Supported.distribution("ubuntu")) + self.assertTrue(Supported.distribution("centos")) + + def test_distribution_not_supported(self): + self.assertFalse(Supported.distribution("arch")) + self.assertFalse(Supported.distribution("gentoo")) + self.assertFalse(Supported.distribution("")) + + def test_package_manager_supported(self): + self.assertTrue(Supported.package_manager("apt")) + self.assertTrue(Supported.package_manager("yum")) + self.assertTrue(Supported.package_manager("brew")) + + def test_package_manager_not_supported(self): + self.assertFalse(Supported.package_manager("pip")) + self.assertFalse(Supported.package_manager("npm")) + self.assertFalse(Supported.package_manager("")) + + def test_get_os(self): + os_list = Supported.get_os() + self.assertIsInstance(os_list, list) + self.assertIn("linux", os_list) + self.assertIn("darwin", os_list) + self.assertEqual(len(os_list), 2) + + def test_get_distributions(self): + dist_list = Supported.get_distributions() + self.assertIsInstance(dist_list, list) + self.assertIn("debian", dist_list) + self.assertIn("ubuntu", dist_list) + self.assertIn("centos", dist_list) + self.assertIn("fedora", dist_list) + self.assertIn("alpine", dist_list) + self.assertEqual(len(dist_list), 5) + + +class TestHostInformation(unittest.TestCase): + @patch("platform.system") + def test_get_os_name(self, mock_system): + mock_system.return_value = "Linux" + self.assertEqual(HostInformation.get_os_name(), "linux") + + mock_system.return_value = "Darwin" + self.assertEqual(HostInformation.get_os_name(), "darwin") + + mock_system.return_value = "Windows" + self.assertEqual(HostInformation.get_os_name(), "windows") + + @patch("app.utils.lib.HostInformation.get_os_name") + @patch("app.utils.lib.HostInformation.command_exists") + def test_get_package_manager_macos(self, mock_command_exists, mock_get_os_name): + mock_get_os_name.return_value = "darwin" + mock_command_exists.return_value = True + + result = HostInformation.get_package_manager() + self.assertEqual(result, "brew") + + @patch("app.utils.lib.HostInformation.get_os_name") + @patch("app.utils.lib.HostInformation.command_exists") + def test_get_package_manager_linux_apt(self, mock_command_exists, mock_get_os_name): + mock_get_os_name.return_value = "linux" + + def command_exists_side_effect(command): + return command == "apt" + + mock_command_exists.side_effect = command_exists_side_effect + + result = HostInformation.get_package_manager() + self.assertEqual(result, "apt") + + @patch("app.utils.lib.HostInformation.get_os_name") + @patch("app.utils.lib.HostInformation.command_exists") + def test_get_package_manager_linux_yum(self, mock_command_exists, mock_get_os_name): + mock_get_os_name.return_value = "linux" + + def command_exists_side_effect(command): + return command == "yum" + + mock_command_exists.side_effect = command_exists_side_effect + + result = HostInformation.get_package_manager() + self.assertEqual(result, "yum") + + @patch("app.utils.lib.HostInformation.get_os_name") + @patch("app.utils.lib.HostInformation.command_exists") + def test_get_package_manager_no_supported_manager(self, mock_command_exists, mock_get_os_name): + mock_get_os_name.return_value = "linux" + mock_command_exists.return_value = False + + with self.assertRaises(RuntimeError) as context: + HostInformation.get_package_manager() + + self.assertIn("No supported package manager found", str(context.exception)) + + @patch("shutil.which") + def test_command_exists_true(self, mock_which): + mock_which.return_value = "/usr/bin/apt" + self.assertTrue(HostInformation.command_exists("apt")) + + @patch("shutil.which") + def test_command_exists_false(self, mock_which): + mock_which.return_value = None + self.assertFalse(HostInformation.command_exists("nonexistent")) + + @patch("requests.get") + def test_get_public_ip_success(self, mock_get): + mock_response = Mock() + mock_response.text = "192.168.1.1" + mock_response.raise_for_status.return_value = None + mock_get.return_value = mock_response + + result = HostInformation.get_public_ip() + self.assertEqual(result, "192.168.1.1") + mock_get.assert_called_once_with("https://api.ipify.org", timeout=10) + + @patch("requests.get") + def test_get_public_ip_http_error(self, mock_get): + mock_get.side_effect = requests.HTTPError("404 Not Found") + + with self.assertRaises(Exception) as context: + HostInformation.get_public_ip() + + self.assertEqual(str(context.exception), FAILED_TO_GET_PUBLIC_IP_MESSAGE) + + @patch("requests.get") + def test_get_public_ip_connection_error(self, mock_get): + mock_get.side_effect = requests.ConnectionError("Connection failed") + + with self.assertRaises(Exception) as context: + HostInformation.get_public_ip() + + self.assertEqual(str(context.exception), FAILED_TO_GET_PUBLIC_IP_MESSAGE) + + @patch("requests.get") + def test_get_public_ip_timeout(self, mock_get): + mock_get.side_effect = requests.Timeout("Request timeout") + + with self.assertRaises(Exception) as context: + HostInformation.get_public_ip() + + self.assertEqual(str(context.exception), FAILED_TO_GET_PUBLIC_IP_MESSAGE) + + +class TestParallelProcessor(unittest.TestCase): + def test_process_items_empty_list(self): + def processor(x): + return x * 2 + + results = ParallelProcessor.process_items([], processor) + self.assertEqual(results, []) + + def test_process_items_single_item(self): + def processor(x): + return x * 2 + + results = ParallelProcessor.process_items([5], processor) + self.assertEqual(results, [10]) + + def test_process_items_multiple_items(self): + def processor(x): + return x * 2 + + results = ParallelProcessor.process_items([1, 2, 3, 4, 5], processor) + self.assertEqual(len(results), 5) + self.assertEqual(set(results), {2, 4, 6, 8, 10}) + + def test_process_items_with_error_handler(self): + def processor(x): + if x == 3: + raise ValueError("Test error") + return x * 2 + + def error_handler(item, error): + return f"Error processing {item}: {str(error)}" + + results = ParallelProcessor.process_items([1, 2, 3, 4, 5], processor, error_handler=error_handler) + self.assertEqual(len(results), 5) + + error_results = [r for r in results if "Error processing 3" in str(r)] + normal_results = [r for r in results if isinstance(r, int)] + + self.assertEqual(len(error_results), 1) + self.assertEqual(set(normal_results), {2, 4, 8, 10}) + + def test_process_items_without_error_handler(self): + def processor(x): + if x == 3: + raise ValueError("Test error") + return x * 2 + + results = ParallelProcessor.process_items([1, 2, 3, 4, 5], processor) + self.assertEqual(len(results), 4) + self.assertEqual(set(results), {2, 4, 8, 10}) + + def test_process_items_max_workers_limit(self): + def processor(x): + return x * 2 + + results = ParallelProcessor.process_items([1, 2, 3, 4, 5], processor, max_workers=2) + self.assertEqual(len(results), 5) + self.assertEqual(set(results), {2, 4, 6, 8, 10}) + + def test_process_items_max_workers_exceeds_items(self): + def processor(x): + return x * 2 + + results = ParallelProcessor.process_items([1, 2], processor, max_workers=10) + self.assertEqual(len(results), 2) + self.assertEqual(set(results), {2, 4}) + + +class TestDirectoryManager(unittest.TestCase): + @patch("os.path.exists") + def test_path_exists_true(self, mock_exists): + mock_exists.return_value = True + self.assertTrue(DirectoryManager.path_exists("/test/path")) + + @patch("os.path.exists") + def test_path_exists_false(self, mock_exists): + mock_exists.return_value = False + self.assertFalse(DirectoryManager.path_exists("/test/path")) + + @patch("os.path.exists") + def test_path_exists_and_not_force_true(self, mock_exists): + mock_exists.return_value = True + self.assertTrue(DirectoryManager.path_exists_and_not_force("/test/path", False)) + + @patch("os.path.exists") + def test_path_exists_and_not_force_false_when_force(self, mock_exists): + mock_exists.return_value = True + self.assertFalse(DirectoryManager.path_exists_and_not_force("/test/path", True)) + + @patch("os.path.exists") + def test_path_exists_and_not_force_false_when_not_exists(self, mock_exists): + mock_exists.return_value = False + self.assertFalse(DirectoryManager.path_exists_and_not_force("/test/path", False)) + + @patch("shutil.rmtree") + @patch("os.path.exists") + @patch("os.path.isdir") + def test_remove_directory_success(self, mock_isdir, mock_exists, mock_rmtree): + mock_exists.return_value = True + mock_isdir.return_value = True + mock_logger = Mock() + + result = DirectoryManager.remove_directory("/test/path", mock_logger) + + self.assertTrue(result) + mock_rmtree.assert_called_once_with("/test/path") + mock_logger.debug.assert_called() + + @patch("shutil.rmtree") + @patch("os.path.exists") + def test_remove_directory_success_no_logger(self, mock_exists, mock_rmtree): + mock_exists.return_value = True + + result = DirectoryManager.remove_directory("/test/path") + + self.assertTrue(result) + mock_rmtree.assert_called_once_with("/test/path") + + @patch("shutil.rmtree") + @patch("os.path.exists") + def test_remove_directory_failure(self, mock_exists, mock_rmtree): + mock_exists.return_value = True + mock_rmtree.side_effect = PermissionError("Permission denied") + mock_logger = Mock() + + result = DirectoryManager.remove_directory("/test/path", mock_logger) + + self.assertFalse(result) + mock_logger.debug.assert_called() + mock_logger.error.assert_called_once() + + @patch("shutil.rmtree") + @patch("os.path.exists") + def test_remove_directory_failure_no_logger(self, mock_exists, mock_rmtree): + mock_exists.return_value = True + mock_rmtree.side_effect = OSError("Directory not found") + + result = DirectoryManager.remove_directory("/test/path") + + self.assertFalse(result) + + +class TestFileManager(unittest.TestCase): + def setUp(self): + self.temp_dir = tempfile.mkdtemp() + self.test_file = os.path.join(self.temp_dir, "test.txt") + + def tearDown(self): + shutil.rmtree(self.temp_dir, ignore_errors=True) + + @patch("os.chmod") + def test_set_permissions_success(self, mock_chmod): + mock_logger = Mock() + + with open(self.test_file, "w") as f: + f.write("test content") + + success, error = FileManager.set_permissions(self.test_file, 0o644, mock_logger) + + self.assertTrue(success) + self.assertIsNone(error) + mock_chmod.assert_called_once_with(self.test_file, 0o644) + mock_logger.debug.assert_called() + + @patch("os.chmod") + def test_set_permissions_failure(self, mock_chmod): + mock_chmod.side_effect = PermissionError("Permission denied") + mock_logger = Mock() + + success, error = FileManager.set_permissions(self.test_file, 0o644, mock_logger) + + self.assertFalse(success) + self.assertIn("Failed to set permissions", error) + mock_logger.error.assert_called_once() + + @patch("os.chmod") + def test_set_permissions_success_no_logger(self, mock_chmod): + with open(self.test_file, "w") as f: + f.write("test content") + + success, error = FileManager.set_permissions(self.test_file, 0o644) + + self.assertTrue(success) + self.assertIsNone(error) + mock_chmod.assert_called_once_with(self.test_file, 0o644) + + @patch("os.makedirs") + def test_create_directory_success_new(self, mock_makedirs): + mock_logger = Mock() + test_dir = os.path.join(self.temp_dir, "new_dir") + + success, error = FileManager.create_directory(test_dir, 0o755, mock_logger) + + self.assertTrue(success) + self.assertIsNone(error) + mock_makedirs.assert_called_once_with(test_dir, mode=0o755) + mock_logger.debug.assert_called_once() + + @patch("os.makedirs") + @patch("os.path.exists") + def test_create_directory_success_exists(self, mock_exists, mock_makedirs): + mock_logger = Mock() + test_dir = os.path.join(self.temp_dir, "existing_dir") + mock_exists.return_value = True + + success, error = FileManager.create_directory(test_dir, 0o755, mock_logger) + + self.assertTrue(success) + self.assertIsNone(error) + mock_makedirs.assert_not_called() + + @patch("os.makedirs") + def test_create_directory_failure(self, mock_makedirs): + mock_makedirs.side_effect = PermissionError("Permission denied") + mock_logger = Mock() + test_dir = "/root/restricted_dir" + + success, error = FileManager.create_directory(test_dir, 0o755, mock_logger) + + self.assertFalse(success) + self.assertIn("Failed to create directory", error) + mock_logger.error.assert_called_once() + + def test_append_to_file_success(self): + mock_logger = Mock() + content = "new content" + + success, error = FileManager.append_to_file(self.test_file, content, 0o644, mock_logger) + + self.assertTrue(success) + self.assertIsNone(error) + + with open(self.test_file, "r") as f: + file_content = f.read() + + self.assertIn(content, file_content) + mock_logger.debug.assert_called() + + def test_append_to_file_failure_permission(self): + mock_logger = Mock() + content = "new content" + + with patch("builtins.open", side_effect=PermissionError("Permission denied")): + success, error = FileManager.append_to_file(self.test_file, content, 0o644, mock_logger) + + self.assertFalse(success) + self.assertIn("Failed to append to", error) + mock_logger.error.assert_called_once() + + def test_read_file_content_success(self): + content = "test content" + with open(self.test_file, "w") as f: + f.write(content) + + success, file_content, error = FileManager.read_file_content(self.test_file) + + self.assertTrue(success) + self.assertEqual(file_content, content) + self.assertIsNone(error) + + def test_read_file_content_failure(self): + mock_logger = Mock() + + with patch("builtins.open", side_effect=FileNotFoundError("File not found")): + success, file_content, error = FileManager.read_file_content(self.test_file, mock_logger) + + self.assertFalse(success) + self.assertIsNone(file_content) + self.assertIn("Failed to read", error) + mock_logger.error.assert_called_once() + + def test_read_file_content_strips_whitespace(self): + content = " test content \n" + with open(self.test_file, "w") as f: + f.write(content) + + success, file_content, error = FileManager.read_file_content(self.test_file) + + self.assertTrue(success) + self.assertEqual(file_content, "test content") + self.assertIsNone(error) + + @patch("os.path.expanduser") + def test_expand_user_path(self, mock_expanduser): + mock_expanduser.return_value = "/home/user/test" + + result = FileManager.expand_user_path("~/test") + + self.assertEqual(result, "/home/user/test") + mock_expanduser.assert_called_once_with("~/test") + + @patch("os.path.dirname") + def test_get_directory_path(self, mock_dirname): + mock_dirname.return_value = "/path/to" + + result = FileManager.get_directory_path("/path/to/file.txt") + + self.assertEqual(result, "/path/to") + mock_dirname.assert_called_once_with("/path/to/file.txt") + + def test_get_public_key_path(self): + private_key_path = "/path/to/id_rsa" + expected_public_key_path = "/path/to/id_rsa.pub" + + result = FileManager.get_public_key_path(private_key_path) + + self.assertEqual(result, expected_public_key_path) + + def test_get_public_key_path_empty_string(self): + result = FileManager.get_public_key_path("") + self.assertEqual(result, ".pub") + + def test_get_public_key_path_with_spaces(self): + private_key_path = "/path with spaces/id_rsa" + expected_public_key_path = "/path with spaces/id_rsa.pub" + + result = FileManager.get_public_key_path(private_key_path) + + self.assertEqual(result, expected_public_key_path) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/cli/tests/utils/test_processor.py b/cli/tests/utils/test_processor.py new file mode 100644 index 00000000..3545c1e6 --- /dev/null +++ b/cli/tests/utils/test_processor.py @@ -0,0 +1,85 @@ +import time +import unittest + +from app.utils.lib import ParallelProcessor + + +class TestParallelProcessor(unittest.TestCase): + + def test_basic_processing(self): + """Test basic parallel processing functionality""" + + def square(x): + return x * x + + items = [1, 2, 3, 4, 5] + results = ParallelProcessor.process_items(items, square) + + # Results are in completion order, not input order + self.assertEqual(len(results), 5) + self.assertEqual(set(results), {1, 4, 9, 16, 25}) + + def test_error_handling(self): + """Test error handling in parallel processing""" + + def process_with_error(x): + if x == 3: + raise ValueError("Test error") + return x * 2 + + def error_handler(item, error): + return f"Error processing {item}: {str(error)}" + + items = [1, 2, 3, 4, 5] + results = ParallelProcessor.process_items(items, process_with_error, error_handler=error_handler) + + self.assertEqual(len(results), 5) + # Check that we have the expected results (order may vary) + expected_results = {2, 4, 8, 10} # 1*2, 2*2, 4*2, 5*2 + error_results = [r for r in results if "Error processing 3" in str(r)] + normal_results = [r for r in results if isinstance(r, int)] + + self.assertEqual(len(error_results), 1) + self.assertEqual(set(normal_results), expected_results) + + def test_timeout_behavior(self): + """Test that processing respects timeout behavior""" + + def slow_process(x): + time.sleep(0.1) + return x * 2 + + items = list(range(10)) + start_time = time.time() + results = ParallelProcessor.process_items(items, slow_process, max_workers=5) + end_time = time.time() + + self.assertEqual(len(results), 10) + # Results are in completion order, not input order + self.assertEqual(set(results), {0, 2, 4, 6, 8, 10, 12, 14, 16, 18}) + + # With 5 workers and 10 items taking 0.1s each, should complete in ~0.2s + # (2 batches of 5 items each) + self.assertLess(end_time - start_time, 0.5) + + def test_empty_list(self): + """Test processing empty list""" + + def process(x): + return x * 2 + + results = ParallelProcessor.process_items([], process) + self.assertEqual(results, []) + + def test_single_item(self): + """Test processing single item""" + + def process(x): + return x * 2 + + results = ParallelProcessor.process_items([5], process) + self.assertEqual(results, [10]) + + +if __name__ == "__main__": + unittest.main() diff --git a/cli/tests/utils/test_timeout.py b/cli/tests/utils/test_timeout.py new file mode 100644 index 00000000..2744bba8 --- /dev/null +++ b/cli/tests/utils/test_timeout.py @@ -0,0 +1,192 @@ +import signal +import time +import unittest +from unittest.mock import Mock, patch + +from app.utils.timeout import TimeoutWrapper +from app.commands.install.messages import timeout_error + + +class TestTimeoutWrapper(unittest.TestCase): + def setUp(self): + self.original_signal = signal.signal + self.original_alarm = signal.alarm + + def tearDown(self): + signal.signal = self.original_signal + signal.alarm = self.original_alarm + + def test_timeout_wrapper_zero_timeout(self): + with TimeoutWrapper(0) as wrapper: + self.assertEqual(wrapper.timeout, 0) + time.sleep(0.1) + + def test_timeout_wrapper_negative_timeout(self): + with TimeoutWrapper(-1) as wrapper: + self.assertEqual(wrapper.timeout, -1) + time.sleep(0.1) + + def test_timeout_wrapper_positive_timeout_success(self): + with TimeoutWrapper(5) as wrapper: + self.assertEqual(wrapper.timeout, 5) + time.sleep(0.1) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_signal_setup(self, mock_alarm, mock_signal): + mock_signal.return_value = None + + with TimeoutWrapper(10) as wrapper: + mock_signal.assert_called_once_with(signal.SIGALRM, unittest.mock.ANY) + mock_alarm.assert_called_once_with(10) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_signal_cleanup(self, mock_alarm, mock_signal): + mock_signal.return_value = None + + with TimeoutWrapper(10): + pass + + mock_alarm.assert_has_calls([ + unittest.mock.call(10), + unittest.mock.call(0) + ]) + self.assertEqual(mock_signal.call_count, 2) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_zero_timeout_no_signal_setup(self, mock_alarm, mock_signal): + with TimeoutWrapper(0): + pass + + mock_signal.assert_not_called() + mock_alarm.assert_not_called() + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_negative_timeout_no_signal_setup(self, mock_alarm, mock_signal): + with TimeoutWrapper(-5): + pass + + mock_signal.assert_not_called() + mock_alarm.assert_not_called() + + def test_timeout_wrapper_timeout_triggered(self): + with self.assertRaises(TimeoutError) as context: + with TimeoutWrapper(1): + time.sleep(2) + + expected_message = timeout_error.format(timeout=1) + self.assertEqual(str(context.exception), expected_message) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_exception_handling(self, mock_alarm, mock_signal): + mock_signal.return_value = None + + with self.assertRaises(ValueError): + with TimeoutWrapper(10): + raise ValueError("Test exception") + + mock_alarm.assert_has_calls([ + unittest.mock.call(10), + unittest.mock.call(0) + ]) + mock_signal.assert_has_calls([ + unittest.mock.call(signal.SIGALRM, unittest.mock.ANY), + unittest.mock.call(signal.SIGALRM, None) + ]) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_signal_handler_registration(self, mock_alarm, mock_signal): + mock_signal.return_value = None + + with TimeoutWrapper(5) as wrapper: + mock_signal.assert_called_once() + call_args = mock_signal.call_args + self.assertEqual(call_args[0][0], signal.SIGALRM) + self.assertTrue(callable(call_args[0][1])) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_signal_handler_raises_timeout(self, mock_alarm, mock_signal): + mock_signal.return_value = None + + with TimeoutWrapper(5) as wrapper: + signal_handler = mock_signal.call_args[0][1] + + with self.assertRaises(TimeoutError) as context: + signal_handler(signal.SIGALRM, None) + + expected_message = timeout_error.format(timeout=5) + self.assertEqual(str(context.exception), expected_message) + + def test_timeout_wrapper_nested_usage(self): + with TimeoutWrapper(10) as outer: + with TimeoutWrapper(5) as inner: + self.assertEqual(outer.timeout, 10) + self.assertEqual(inner.timeout, 5) + time.sleep(0.1) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_multiple_instances(self, mock_alarm, mock_signal): + mock_signal.return_value = None + + wrapper1 = TimeoutWrapper(10) + wrapper2 = TimeoutWrapper(5) + + with wrapper1: + with wrapper2: + pass + + self.assertEqual(mock_alarm.call_count, 4) + + def test_timeout_wrapper_return_value(self): + with TimeoutWrapper(10) as wrapper: + self.assertIsInstance(wrapper, TimeoutWrapper) + self.assertEqual(wrapper.timeout, 10) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_signal_restoration(self, mock_alarm, mock_signal): + original_handler = Mock() + mock_signal.return_value = original_handler + + with TimeoutWrapper(10): + pass + + mock_signal.assert_has_calls([ + unittest.mock.call(signal.SIGALRM, unittest.mock.ANY), + unittest.mock.call(signal.SIGALRM, original_handler) + ]) + + def test_timeout_wrapper_large_timeout_value(self): + with TimeoutWrapper(999999) as wrapper: + self.assertEqual(wrapper.timeout, 999999) + time.sleep(0.1) + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_signal_error_handling(self, mock_alarm, mock_signal): + mock_signal.side_effect = OSError("Signal not supported") + + with self.assertRaises(OSError): + with TimeoutWrapper(10): + pass + + @patch('signal.signal') + @patch('signal.alarm') + def test_timeout_wrapper_alarm_error_handling(self, mock_alarm, mock_signal): + mock_signal.return_value = None + mock_alarm.side_effect = OSError("Alarm not supported") + + with self.assertRaises(OSError): + with TimeoutWrapper(10): + pass + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 104e70f4..0e927ff1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,8 +13,8 @@ services: - HOST_NAME=nixopus-db volumes: - ./logs:/app/logs - - ${DOCKER_CERT_PATH}:/etc/nixopus/docker-certs - - ${SSH_PRIVATE_KEY}:/etc/nixopus/ssh/id_rsa + - ${DOCKER_CERT_PATH:-/etc/nixopus/docker-certs}:/etc/nixopus/docker-certs + - ${SSH_PRIVATE_KEY:-/etc/nixopus/ssh/id_rsa}:/etc/nixopus/ssh/id_rsa - ${MOUNT_PATH:-/etc/nixopus/configs}:/etc/nixopus/configs - /var/run/docker.sock:/var/run/docker.sock - /etc/nixopus/source/api/.env:/app/.env @@ -100,7 +100,7 @@ services: - "80:80" - "443:443" volumes: - - /etc/nixopus/caddy/Caddyfile:/etc/caddy/Caddyfile + - /etc/nixopus/source/helpers/Caddyfile:/etc/caddy/Caddyfile - ${CADDY_DATA_VOLUME:-/etc/nixopus/caddy}:/data - ${CADDY_CONFIG_VOLUME:-/etc/nixopus/caddy}:/config command: diff --git a/docs/.vitepress/config.mts b/docs/.vitepress/config.mts index d0f80163..0bce3027 100644 --- a/docs/.vitepress/config.mts +++ b/docs/.vitepress/config.mts @@ -50,9 +50,9 @@ export default withMermaid( text: "Edit this page on Github" }, nav: [ - { text: 'Home', link: '/' }, { text: 'Get Started', link: '/install/index.md' }, - { text: 'Blog', link: '/blog/' } + { text: "CLI", link: '/cli/index.md' }, + { text: 'Blog', link: '/blog/' }, ], footer: { message: `Made with love
Released under the Functional Source License (FSL)`, @@ -77,6 +77,31 @@ export default withMermaid( { text: "Notifications", link: "/notifications/index.md" } ] }, + { + text: 'CLI', + items: [ + { text: 'Overview', link: '/cli/index.md' }, + { text: 'Installation', link: '/cli/installation.md' }, + { text: 'Configuration', link: '/cli/config.md' }, + { + text: 'Commands', + collapsed: true, + items: [ + { text: 'preflight', link: '/cli/commands/preflight.md' }, + { text: 'install', link: '/cli/commands/install.md' }, + { text: 'uninstall', link: '/cli/commands/uninstall.md' }, + { text: 'service', link: '/cli/commands/service.md' }, + { text: 'conf', link: '/cli/commands/conf.md' }, + { text: 'proxy', link: '/cli/commands/proxy.md' }, + { text: 'clone', link: '/cli/commands/clone.md' }, + { text: 'version', link: '/cli/commands/version.md' }, + { text: 'test', link: '/cli/commands/test.md' } + ] + }, + { text: 'Reference', link: '/cli/cli-reference.md' }, + { text: 'Development', link: '/cli/development.md' } + ] + }, { text: 'Blog', items: [ diff --git a/docs/cli/cli-reference.md b/docs/cli/cli-reference.md new file mode 100644 index 00000000..a32a0a19 --- /dev/null +++ b/docs/cli/cli-reference.md @@ -0,0 +1,520 @@ +# `nixopus` + +Nixopus CLI - A powerful deployment and management tool + +**Usage**: + +```console +$ nixopus [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `-v, --version`: Show version information +* `--help`: Show this message and exit. + +**Commands**: + +* `preflight`: Preflight checks for system compatibility +* `clone`: Clone a repository +* `conf`: Manage configuration +* `service`: Manage Nixopus services +* `proxy`: Manage Nixopus proxy (Caddy) configuration +* `install`: Install Nixopus +* `uninstall`: Uninstall Nixopus +* `version`: Show version information +* `test`: Run tests (only in DEVELOPMENT environment) + +## `nixopus preflight` + +Preflight checks for system compatibility + +**Usage**: + +```console +$ nixopus preflight [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `--help`: Show this message and exit. + +**Commands**: + +* `check`: Run all preflight checks +* `ports`: Check if list of ports are available on a... +* `deps`: Check if list of dependencies are... + +### `nixopus preflight check` + +Run all preflight checks + +**Usage**: + +```console +$ nixopus preflight check [OPTIONS] +``` + +**Options**: + +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text,json [default: text] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus preflight ports` + +Check if list of ports are available on a host + +**Usage**: + +```console +$ nixopus preflight ports [OPTIONS] PORTS... +``` + +**Arguments**: + +* `PORTS...`: The list of ports to check [required] + +**Options**: + +* `-h, --host TEXT`: The host to check [default: localhost] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus preflight deps` + +Check if list of dependencies are available on the system + +**Usage**: + +```console +$ nixopus preflight deps [OPTIONS] DEPS... +``` + +**Arguments**: + +* `DEPS...`: The list of dependencies to check [required] + +**Options**: + +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +## `nixopus clone` + +Clone a repository + +**Usage**: + +```console +$ nixopus clone [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `-r, --repo TEXT`: The repository to clone [default: https://github.com/raghavyuva/nixopus] +* `-b, --branch TEXT`: The branch to clone [default: master] +* `-p, --path TEXT`: The path to clone the repository to [default: /etc/nixopus/source] +* `-f, --force`: Force the clone +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +## `nixopus conf` + +Manage configuration + +**Usage**: + +```console +$ nixopus conf [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `--help`: Show this message and exit. + +**Commands**: + +* `list`: List all configuration +* `delete`: Delete a configuration +* `set`: Set a configuration + +### `nixopus conf list` + +List all configuration + +**Usage**: + +```console +$ nixopus conf list [OPTIONS] +``` + +**Options**: + +* `-s, --service TEXT`: The name of the service to list configuration for, e.g api,view [default: api] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-e, --env-file TEXT`: Path to the environment file +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus conf delete` + +Delete a configuration + +**Usage**: + +```console +$ nixopus conf delete [OPTIONS] KEY +``` + +**Arguments**: + +* `KEY`: The key of the configuration to delete [required] + +**Options**: + +* `-s, --service TEXT`: The name of the service to delete configuration for, e.g api,view [default: api] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-e, --env-file TEXT`: Path to the environment file +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus conf set` + +Set a configuration + +**Usage**: + +```console +$ nixopus conf set [OPTIONS] KEY_VALUE +``` + +**Arguments**: + +* `KEY_VALUE`: Configuration in the form KEY=VALUE [required] + +**Options**: + +* `-s, --service TEXT`: The name of the service to set configuration for, e.g api,view [default: api] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-e, --env-file TEXT`: Path to the environment file +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +## `nixopus service` + +Manage Nixopus services + +**Usage**: + +```console +$ nixopus service [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `--help`: Show this message and exit. + +**Commands**: + +* `up`: Start Nixopus services +* `down`: Stop Nixopus services +* `ps`: Show status of Nixopus services +* `restart`: Restart Nixopus services + +### `nixopus service up` + +Start Nixopus services + +**Usage**: + +```console +$ nixopus service up [OPTIONS] +``` + +**Options**: + +* `-n, --name TEXT`: The name of the service to start, defaults to all [default: all] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `--dry-run`: Dry run +* `-d, --detach`: Detach from the service and run in the background +* `-e, --env-file TEXT`: Path to the environment file +* `-f, --compose-file TEXT`: Path to the compose file [default: /etc/nixopus/source/docker-compose.yml] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus service down` + +Stop Nixopus services + +**Usage**: + +```console +$ nixopus service down [OPTIONS] +``` + +**Options**: + +* `-n, --name TEXT`: The name of the service to stop, defaults to all [default: all] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `--dry-run`: Dry run +* `-e, --env-file TEXT`: Path to the environment file +* `-f, --compose-file TEXT`: Path to the compose file [default: /etc/nixopus/source/docker-compose.yml] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus service ps` + +Show status of Nixopus services + +**Usage**: + +```console +$ nixopus service ps [OPTIONS] +``` + +**Options**: + +* `-n, --name TEXT`: The name of the service to show, defaults to all [default: all] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-e, --env-file TEXT`: Path to the environment file +* `-f, --compose-file TEXT`: Path to the compose file [default: /etc/nixopus/source/docker-compose.yml] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus service restart` + +Restart Nixopus services + +**Usage**: + +```console +$ nixopus service restart [OPTIONS] +``` + +**Options**: + +* `-n, --name TEXT`: The name of the service to restart, defaults to all [default: all] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-e, --env-file TEXT`: Path to the environment file +* `-f, --compose-file TEXT`: Path to the compose file [default: /etc/nixopus/source/docker-compose.yml] +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +## `nixopus proxy` + +Manage Nixopus proxy (Caddy) configuration + +**Usage**: + +```console +$ nixopus proxy [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `--help`: Show this message and exit. + +**Commands**: + +* `load`: Load Caddy proxy configuration +* `status`: Check Caddy proxy status +* `stop`: Stop Caddy proxy + +### `nixopus proxy load` + +Load Caddy proxy configuration + +**Usage**: + +```console +$ nixopus proxy load [OPTIONS] +``` + +**Options**: + +* `-p, --proxy-port INTEGER`: Caddy admin port [default: 2019] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format: text, json [default: text] +* `--dry-run`: Dry run +* `-c, --config-file TEXT`: Path to Caddy config file +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus proxy status` + +Check Caddy proxy status + +**Usage**: + +```console +$ nixopus proxy status [OPTIONS] +``` + +**Options**: + +* `-p, --proxy-port INTEGER`: Caddy admin port [default: 2019] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format: text, json [default: text] +* `--dry-run`: Dry run +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus proxy stop` + +Stop Caddy proxy + +**Usage**: + +```console +$ nixopus proxy stop [OPTIONS] +``` + +**Options**: + +* `-p, --proxy-port INTEGER`: Caddy admin port [default: 2019] +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format: text, json [default: text] +* `--dry-run`: Dry run +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +## `nixopus install` + +Install Nixopus + +**Usage**: + +```console +$ nixopus install [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `-v, --verbose`: Show more details while installing +* `-t, --timeout INTEGER`: How long to wait for each step (in seconds) [default: 300] +* `-f, --force`: Replace files if they already exist +* `-d, --dry-run`: See what would happen, but don't make changes +* `-c, --config-file TEXT`: Path to custom config file (defaults to built-in config) +* `-ad, --api-domain TEXT`: The domain where the nixopus api will be accessible (e.g. api.nixopus.com), if not provided you can use the ip address of the server and the port (e.g. 192.168.1.100:8443) +* `-vd, --view-domain TEXT`: The domain where the nixopus view will be accessible (e.g. nixopus.com), if not provided you can use the ip address of the server and the port (e.g. 192.168.1.100:80) +* `--help`: Show this message and exit. + +**Commands**: + +* `ssh`: Generate an SSH key pair with proper... +* `deps`: Install dependencies + +### `nixopus install ssh` + +Generate an SSH key pair with proper permissions and optional authorized_keys integration + +**Usage**: + +```console +$ nixopus install ssh [OPTIONS] +``` + +**Options**: + +* `-p, --path TEXT`: The SSH key path to generate [default: ~/.ssh/nixopus_ed25519] +* `-t, --key-type TEXT`: The SSH key type (rsa, ed25519, ecdsa) [default: ed25519] +* `-s, --key-size INTEGER`: The SSH key size [default: 4096] +* `-P, --passphrase TEXT`: The passphrase to use for the SSH key +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-f, --force`: Force overwrite existing SSH key +* `-S, --set-permissions`: Set proper file permissions [default: True] +* `-a, --add-to-authorized-keys`: Add public key to authorized_keys +* `-c, --create-ssh-directory`: Create .ssh directory if it doesn't exist [default: True] +* `-T, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +### `nixopus install deps` + +Install dependencies + +**Usage**: + +```console +$ nixopus install deps [OPTIONS] +``` + +**Options**: + +* `-v, --verbose`: Verbose output +* `-o, --output TEXT`: Output format, text, json [default: text] +* `-d, --dry-run`: Dry run +* `-t, --timeout INTEGER`: Timeout in seconds [default: 10] +* `--help`: Show this message and exit. + +## `nixopus uninstall` + +Uninstall Nixopus + +**Usage**: + +```console +$ nixopus uninstall [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `-v, --verbose`: Show more details while uninstalling +* `-t, --timeout INTEGER`: How long to wait for each step (in seconds) [default: 300] +* `-d, --dry-run`: See what would happen, but don't make changes +* `-f, --force`: Remove files without confirmation prompts +* `--help`: Show this message and exit. + +## `nixopus version` + +Show version information + +**Usage**: + +```console +$ nixopus version [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `--help`: Show this message and exit. + +## `nixopus test` + +Run tests (only in DEVELOPMENT environment) + +**Usage**: + +```console +$ nixopus test [OPTIONS] [TARGET] COMMAND [ARGS]... +``` + +**Arguments**: + +* `[TARGET]`: Test target (e.g., version) + +**Options**: + +* `--help`: Show this message and exit. diff --git a/docs/cli/commands/clone.md b/docs/cli/commands/clone.md new file mode 100644 index 00000000..cbe33f62 --- /dev/null +++ b/docs/cli/commands/clone.md @@ -0,0 +1,154 @@ +# clone - Repository Cloning + +The `clone` command clones the Nixopus repository with basic configuration options. By default, it clones the main Nixopus repository to a configured local path. + +## Quick Start +```bash +# Clone with default settings (from config) +nixopus clone + +# Clone specific branch +nixopus clone --branch develop + +# Clone from custom repository +nixopus clone --repo https://github.com/yourfork/nixopus.git + +# Preview clone operation +nixopus clone --dry-run +``` + +## Overview + +The clone command provides basic Git repository cloning functionality with configuration-driven defaults for the Nixopus repository. + +## Command Syntax + +```bash +nixopus clone [OPTIONS] +``` + +## Options + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--repo` | `-r` | Repository URL to clone | `https://github.com/raghavyuva/nixopus` | +| `--branch` | `-b` | Branch to clone | `master` | +| `--path` | `-p` | Local path for cloning | `/etc/nixopus/source` | +| `--force` | `-f` | Force clone (overwrite existing directory) | `false` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format | `text` | +| `--dry-run` | `-d` | Preview clone operation without executing | `false` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Basic clone with default configuration +nixopus clone + +# Clone specific branch +nixopus clone --branch develop + +# Clone from custom repository +nixopus clone --repo https://github.com/yourfork/nixopus.git + +# Clone to custom path with force overwrite +nixopus clone --path /opt/nixopus --force + +# Preview operation without executing +nixopus clone --dry-run --verbose + +# Clone with increased timeout +nixopus clone --timeout 30 +``` + +## Configuration + +The clone command reads configuration values from the built-in [`config.prod.yaml`](https://raw.githubusercontent.com/raghavyuva/nixopus/refs/heads/master/helpers/config.prod.yaml) file. Command-line options override these defaults. + +### Default Configuration Values + +| Setting | Default Value | Configuration Path | Description | +|---------|---------------|-------------------|-------------| +| Repository URL | `https://github.com/raghavyuva/nixopus` | `clone.repo` | The Git repository to clone | +| Branch | `master` | `clone.branch` | The Git branch to clone | +| Clone Path | `{nixopus-config-dir}/source` | `clone.source-path` | Local directory for cloning (relative to config dir) | +| Config Directory | `/etc/nixopus` | `nixopus-config-dir` | Base configuration directory | +| Timeout | `10` seconds | N/A | Operation timeout (hardcoded default) | + +### Configuration Source + +The configuration is loaded from the built-in [`config.prod.yaml`](https://raw.githubusercontent.com/raghavyuva/nixopus/refs/heads/master/helpers/config.prod.yaml) file packaged with the CLI. This file contains environment variable placeholders that can be overridden: + +```yaml +# Built-in configuration (from config.prod.yaml) +nixopus-config-dir: /etc/nixopus +clone: + repo: "https://github.com/raghavyuva/nixopus" + branch: "master" + source-path: source +``` + +### Overriding Configuration + +You can override defaults using command-line options only: + +```bash +# Override repository URL +nixopus clone --repo https://github.com/yourfork/nixopus.git + +# Override branch +nixopus clone --branch develop + +# Override clone path (absolute path) +nixopus clone --path /opt/nixopus + +# Override multiple options +nixopus clone --repo https://github.com/yourfork/nixopus.git --branch develop --path /custom/path +``` + +**Note**: The clone command does not support user configuration files or environment variable overrides for these settings. Configuration is handled internally through the built-in config file. + +## Behavior + +1. **Validates** repository URL and accessibility +2. **Checks** if destination path exists +3. **Removes** existing directory if `--force` is used +4. **Clones** repository using Git +5. **Reports** success or failure + +## Dry Run Mode + +Use `--dry-run` to preview what the command would do without making changes: + +```bash +nixopus clone --dry-run --repo custom-repo.git --branch develop +``` + +This shows the planned actions without executing them. + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Repository not accessible | Network issues, invalid URL, or authentication required | Check network connection, verify repository URL, or configure Git credentials | +| Destination path already exists | Directory exists at clone path | Use `--force` to overwrite or choose different `--path` | +| Invalid branch or repository URL | Branch doesn't exist or URL is malformed | Verify branch exists and URL is correct | +| Permission denied | Insufficient permissions for destination path | Use `sudo nixopus clone` or choose a path with write permissions | +| Timeout exceeded | Clone taking longer than specified timeout | Increase timeout with `--timeout` option or check network speed | + +### Permission Issues + +If you encounter permission errors when cloning to system directories: + +```bash +# Use sudo for system-wide installation +sudo nixopus clone --path /opt/nixopus + +# Or clone to user directory (recommended) +nixopus clone --path ~/nixopus +``` + +**Note**: When using `sudo`, the cloned repository will be owned by root. Consider using user directories unless system-wide installation is required. diff --git a/docs/cli/commands/conf.md b/docs/cli/commands/conf.md new file mode 100644 index 00000000..b7de342d --- /dev/null +++ b/docs/cli/commands/conf.md @@ -0,0 +1,193 @@ +# conf - Configuration Management + +The `conf` command provides comprehensive configuration management for Nixopus services. Manage environment variables, service settings, and application configuration across API and view services with support for multiple environments. + +## Quick Start +```bash +# List current configuration +nixopus conf list --service api + +# Set configuration value +nixopus conf set DATABASE_URL=postgresql://user:pass@localhost:5432/nixopus + +# Delete configuration key +nixopus conf delete OLD_CONFIG_KEY + +# Set view service configuration +nixopus conf set --service view NODE_ENV=production +``` + +## Overview + +The conf command handles all aspects of Nixopus configuration: +- Environment variable management for services +- Multi-service configuration support (API, view) +- Environment file management (.env files) + +## Subcommands + +### `list` - Display Configuration + +Show all configuration values for specified services with optional filtering and formatting. + +```bash +nixopus conf list [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--service` | `-s` | Target service (api, view) | `api` | +| `--verbose` | `-v` | Show detailed logging and metadata | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--dry-run` | `-d` | Dry run mode | `false` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# List API service configuration +nixopus conf list --service api + +# Get JSON output +nixopus conf list --output json + +# Use custom environment file +nixopus conf list --env-file /custom/path/.env +``` + + +### `set` - Update Configuration + +Set configuration values using KEY=VALUE format with service targeting. + +```bash +nixopus conf set KEY=VALUE [OPTIONS] +``` + +**Arguments:** +- `KEY=VALUE` - Configuration pair (required, single value only) + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--service` | `-s` | Target service (api, view) | `api` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | `-d` | Preview configuration changes | `false` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Set configuration value +nixopus conf set DATABASE_URL=postgresql://user:pass@localhost:5432/nixopus + +# Set for view service +nixopus conf set --service view NODE_ENV=production + +# Preview changes +nixopus conf set DEBUG=true --dry-run +``` + +### `delete` - Remove Configuration + +Remove configuration keys from service environments with safety checks. + +```bash +nixopus conf delete KEY [OPTIONS] +``` + +**Arguments:** +- `KEY` - Configuration key to remove (required, single key only) + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--service` | `-s` | Target service (api, view) | `api` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | `-d` | Preview deletion without executing | `false` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Delete configuration key +nixopus conf delete OLD_CONFIG_KEY + +# Preview deletion +nixopus conf delete TEMP_CONFIG --dry-run +``` + +## Configuration + +The conf command manages environment variables stored in service-specific `.env` files. Configuration is loaded from the built-in `config.prod.yaml` file to determine default environment file locations. + +### Default Environment File Locations + +| Service | Default Environment File | Configuration Path | +|---------|-------------------------|-------------------| +| API | `/etc/nixopus/source/api/.env` | `services.api.env.API_ENV_FILE` | +| View | `/etc/nixopus/source/view/.env` | `services.view.env.VIEW_ENV_FILE` | + +### Configuration Source + +Environment file paths are determined from the built-in `config.prod.yaml`: + +```yaml +# Built-in configuration +services: + api: + env: + API_ENV_FILE: ${API_ENV_FILE:-/etc/nixopus/source/api/.env} + view: + env: + VIEW_ENV_FILE: ${VIEW_ENV_FILE:-/etc/nixopus/source/view/.env} +``` + +### Overriding Environment Files + +You can specify custom environment files using the `--env-file` option: + +```bash +# Use custom environment file +nixopus conf list --env-file /custom/path/.env + +# Set configuration in custom file +nixopus conf set DATABASE_URL=custom --env-file /custom/path/.env + +# Delete from custom file +nixopus conf delete OLD_KEY --env-file /custom/path/.env +``` + +### Permission Requirements + +Environment files require appropriate read/write permissions: + +```bash +# Check current permissions +ls -la /etc/nixopus/source/api/.env + +# Fix permissions if needed (may require sudo) +sudo chmod 644 /etc/nixopus/source/api/.env +sudo chown $(whoami) /etc/nixopus/source/api/.env + +# Or use sudo for operations on system files +sudo nixopus conf set DATABASE_URL=value --service api +``` + + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| File not found | Environment file doesn't exist | Create the file or use `--env-file` with existing file | +| Permission denied | Insufficient file permissions | Use `sudo` or fix file permissions with `chmod` | +| Invalid KEY=VALUE format | Missing equals sign in set command | Ensure format is `KEY=VALUE` | +| Service not found | Invalid service name | Use `api` or `view` for `--service` option | +| Operation timeout | Command taking too long | Increase `--timeout` value | + +**Note**: When using `sudo`, ensure the environment files remain accessible to the services that need them. diff --git a/docs/cli/commands/install.md b/docs/cli/commands/install.md new file mode 100644 index 00000000..a58d388f --- /dev/null +++ b/docs/cli/commands/install.md @@ -0,0 +1,173 @@ +# install - Nixopus Installation + +The `install` command installs Nixopus with all required components and configuration. Provides comprehensive setup including dependencies and SSH key generation. + +## Quick Start +```bash +# Basic installation +nixopus install + +# Install with custom domains +nixopus install --api-domain api.example.com --view-domain app.example.com + +# Preview installation changes +nixopus install --dry-run --verbose +``` + +## Overview + +The install command provides a comprehensive setup process including system validation, dependency installation, and service configuration. + +## Command Syntax + +```bash +nixopus install [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--verbose` | `-v` | Show detailed installation progress | `false` | +| `--timeout` | `-t` | Installation timeout in seconds | `300` | +| `--force` | `-f` | Replace existing files without prompting | `false` | +| `--dry-run` | `-d` | Preview installation without making changes | `false` | +| `--config-file` | `-c` | Path to custom configuration file | None | +| `--api-domain` | `-ad` | Domain for API access | None | +| `--view-domain` | `-vd` | Domain for web interface | None | + +**Examples:** + +```bash +# Standard installation +nixopus install + +# Production installation with custom domains +nixopus install --api-domain api.production.com --view-domain app.production.com --timeout 600 + +# Preview installation with verbose output +nixopus install --dry-run --verbose + +# Force installation (overwrite existing files) +nixopus install --force +``` + +## Subcommands + +### `ssh` - SSH Key Generation + +Generate SSH key pairs with proper permissions and optional authorized_keys integration. + +```bash +nixopus install ssh [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--path` | `-p` | SSH key file path | `~/.ssh/nixopus_ed25519` | +| `--key-type` | `-t` | Key type (rsa, ed25519, ecdsa) | `ed25519` | +| `--key-size` | `-s` | Key size in bits | `4096` | +| `--passphrase` | `-P` | Passphrase for key encryption | None | +| `--force` | `-f` | Overwrite existing SSH keys | `false` | +| `--set-permissions` | `-S` | Set proper file permissions | `true` | +| `--add-to-authorized-keys` | `-a` | Add public key to authorized_keys | `false` | +| `--create-ssh-directory` | `-c` | Create .ssh directory if needed | `true` | +| `--verbose` | `-v` | Show detailed output | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | `-d` | Preview operation | `false` | +| `--timeout` | `-T` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Generate default Ed25519 key +nixopus install ssh + +# Generate RSA key with custom path and size +nixopus install ssh --path ~/.ssh/nixopus_rsa --key-type rsa --key-size 4096 + +# Generate encrypted key for production +nixopus install ssh --passphrase "secure-passphrase" --add-to-authorized-keys +``` + +### `deps` - Dependency Installation + +Install and configure system dependencies required for Nixopus operation. + +```bash +nixopus install deps [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--verbose` | `-v` | Show detailed installation progress | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | `-d` | Preview dependency installation | `false` | +| `--timeout` | `-t` | Installation timeout in seconds | `10` | + +**Examples:** + +```bash +# Install all required dependencies +nixopus install deps + +# Preview dependency installation +nixopus install deps --dry-run --verbose + +# Get JSON output for automation +nixopus install deps --output json +``` + +## Configuration + +The install command reads configuration values from the built-in `config.prod.yaml` file and accepts command-line overrides. + +### Default Configuration Values + +| Setting | Default Value | Description | +|---------|---------------|-------------| +| Timeout | `300` seconds | Maximum time to wait for installation steps | +| SSH Key Path | `~/.ssh/nixopus_ed25519` | Default SSH key location | +| SSH Key Type | `ed25519` | Default SSH key algorithm | +| SSH Key Size | `4096` bits | Default key size for RSA keys | + +### Configuration Source + +Configuration is loaded from the built-in `config.prod.yaml` and command-line options. + +### Overriding Configuration + +You can override defaults using command-line options: + +```bash +# Use custom domains +nixopus install --api-domain api.example.com --view-domain app.example.com + +# Use custom config file +nixopus install --config-file /path/to/config.yaml + +# Custom timeout and force mode +nixopus install --timeout 600 --force +``` + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Permission denied | Insufficient file system permissions | Use `sudo nixopus install` | +| Docker not available | Docker daemon not running | Start Docker service | +| Port conflicts | Ports already in use | Stop conflicting services | +| SSH key generation fails | SSH directory permissions | Fix SSH directory permissions | +| Installation timeout | Network or system issues | Increase timeout with `--timeout` option | + +If permission issues occur, use sudo: +```bash +sudo nixopus install --verbose +``` + +## Related Commands + +- **[preflight](./preflight.md)** - Run system checks before installation +- **[service](./service.md)** - Manage installed services +- **[conf](./conf.md)** - Configure installed services +- **[uninstall](./uninstall.md)** - Remove Nixopus installation \ No newline at end of file diff --git a/docs/cli/commands/preflight.md b/docs/cli/commands/preflight.md new file mode 100644 index 00000000..452fbddc --- /dev/null +++ b/docs/cli/commands/preflight.md @@ -0,0 +1,215 @@ +# preflight - System Readiness Checks + +The `preflight` command runs comprehensive system checks to ensure your environment is properly configured for Nixopus installation and operation. + +## Quick Start +```bash +# Run full system check +nixopus preflight check + +# Check specific ports +nixopus preflight ports 80 443 8080 + +# Verify dependencies +nixopus preflight deps docker git +``` + +## Overview + +The preflight command performs system readiness checks including port availability and dependency verification. + +## Subcommands + +### `check` - Comprehensive System Check + +Runs port availability checks based on configuration. This is the default command when running `preflight` without subcommands. + +```bash +nixopus preflight check [OPTIONS] +nixopus preflight [OPTIONS] # same as check +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--verbose` | `-v` | Show detailed logging information | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Basic system check +nixopus preflight check + +# Detailed check with verbose output +nixopus preflight check --verbose +``` + +**What it does:** +- Reads required ports from the configuration file +- Checks if those ports are available on localhost +- Reports success if all configured ports are free + +### `ports` - Port Availability Check + +Verify specific ports are available for Nixopus services. + +```bash +nixopus preflight ports [PORT...] [OPTIONS] +``` + +**Arguments:** +- `PORT...` - List of ports to check (required) + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--host` | `-h` | Host to check | `localhost` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Check standard web ports +nixopus preflight ports 80 443 + +# Check ports on remote host +nixopus preflight ports 80 443 --host production.server.com + +# Get JSON output +nixopus preflight ports 80 443 8080 --output json +``` + +**Output:** +The command outputs a formatted table or JSON showing port availability status for each port checked. + +### `deps` - Dependency Verification + +Check if required system dependencies are installed and accessible. + +```bash +nixopus preflight deps [DEPENDENCY...] [OPTIONS] +``` + +**Arguments:** +- `DEPENDENCY...` - List of dependencies to check (required) + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Common Dependencies:** +- `docker` - Docker container runtime +- `docker-compose` - Docker Compose orchestration +- `git` - Git version control +- `curl` - HTTP client utility +- `ssh` - SSH client + +**Examples:** + +```bash +# Check core dependencies +nixopus preflight deps docker git + +# Check with verbose output +nixopus preflight deps docker git --verbose + +# Get JSON output +nixopus preflight deps docker git --output json +``` + +**Output:** +The command outputs a formatted table showing dependency availability. Uses `shutil.which()` to check if commands are available in the system PATH. + +## Configuration + +The preflight command reads configuration values from the built-in `config.prod.yaml` file to determine which ports and dependencies to check. + +### Default Configuration Values + +| Setting | Default Value | Configuration Path | Description | +|---------|---------------|-------------------|-------------| +| Ports | `[2019, 80, 443, 7443, 8443, 6379, 5432]` | `ports` | Ports checked by default | +| Timeout | `10` seconds | N/A | Operation timeout (hardcoded default) | + +### Configuration Source + +Configuration is loaded from the built-in `config.prod.yaml`: + +```yaml +# Built-in configuration +ports: [2019, 80, 443, 7443, 8443, 6379, 5432] + +deps: + curl: { package: "curl", command: "curl" } + python3: { package: "python3", command: "python3" } + python3-venv: { package: "python3-venv", command: "" } + git: { package: "git", command: "git" } + docker.io: { package: "docker.io", command: "docker" } + openssl: { package: "openssl", command: "openssl" } + openssh-client: { package: "openssh-client", command: "ssh" } + openssh-server: { package: "openssh-server", command: "sshd" } +``` + +### Port Descriptions + +| Port | Service | Purpose | +|------|---------|---------| +| `2019` | Caddy | Admin API port | +| `80` | HTTP | Web traffic | +| `443` | HTTPS | Secure web traffic | +| `7443` | View | Frontend service | +| `8443` | API | Backend service | +| `6379` | Redis | Cache/session store | +| `5432` | PostgreSQL | Database | + +### Available Dependencies + +| Command | Package | Purpose | +|---------|---------|---------| +| `curl` | curl | HTTP client utility | +| `python3` | python3 | Python runtime | +| `git` | git | Version control | +| `docker` | docker.io | Container runtime | +| `openssl` | openssl | SSL/TLS toolkit | +| `ssh` | openssh-client | SSH client | +| `sshd` | openssh-server | SSH server | + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Port already in use | Service running on checked port | Stop conflicting service or use different ports | +| Command not found | Dependency not installed | Install required package using system package manager | +| Permission denied | Docker requires elevated privileges | Add user to docker group or use sudo | +| Connection timeout | Network issues or slow system | Increase timeout with `--timeout` option | +| Invalid port number | Port outside valid range | Use port numbers between 1-65535 | + +### Permission Issues + +If you encounter permission errors, especially with Docker: + +```bash +# Check if docker requires sudo +docker ps +# If this fails with permission denied: + +# Add user to docker group +sudo usermod -aG docker $USER + +# Restart shell session +newgrp docker + +# Test without sudo +docker ps + +# Or use sudo for preflight checks +sudo nixopus preflight deps docker +``` + +**Note**: When using `sudo`, ensure the command can access the same configuration files. diff --git a/docs/cli/commands/proxy.md b/docs/cli/commands/proxy.md new file mode 100644 index 00000000..c8e178d8 --- /dev/null +++ b/docs/cli/commands/proxy.md @@ -0,0 +1,172 @@ +# proxy - Caddy Proxy Management + +The `proxy` command controls the Caddy reverse proxy server that handles HTTP routing, SSL termination, and load balancing for Nixopus services. Manage proxy configuration, monitor status, and control the proxy lifecycle. + +## Quick Start +```bash +# Load proxy configuration +nixopus proxy load + +# Check proxy status +nixopus proxy status + +# Stop proxy server +nixopus proxy stop +``` + +## Overview + +The proxy command manages Caddy as the reverse proxy for Nixopus: +- HTTP/HTTPS routing to API and view services +- Configuration loading and management +- Proxy status monitoring +- Graceful proxy shutdown + +## Subcommands + +### `load` - Load Proxy Configuration + +Load and apply Caddy proxy configuration from file with validation support. + +```bash +nixopus proxy load [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--proxy-port` | `-p` | Caddy admin API port | `2019` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | | Validate configuration without applying | `false` | +| `--config-file` | `-c` | Path to Caddy configuration file | None | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Load default proxy configuration +nixopus proxy load + +# Load custom configuration file +nixopus proxy load --config-file /path/to/caddy.json + +# Validate configuration without applying +nixopus proxy load --config-file caddy.json --dry-run + +# Load with custom admin port +nixopus proxy load --proxy-port 2019 --verbose +``` + +### `status` - Check Proxy Status + +Display status information about the Caddy proxy server. + +```bash +nixopus proxy status [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--proxy-port` | `-p` | Caddy admin API port | `2019` | +| `--verbose` | `-v` | Show detailed status information | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | | Preview operation without executing | `false` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Basic proxy status +nixopus proxy status + +# Detailed status information +nixopus proxy status --verbose + +# JSON output for monitoring +nixopus proxy status --output json + +# Check with custom admin port +nixopus proxy status --proxy-port 2019 +``` + +### `stop` - Stop Proxy Server + +Gracefully stop the Caddy proxy server. + +```bash +nixopus proxy stop [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--proxy-port` | `-p` | Caddy admin API port | `2019` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | | Preview stop operation | `false` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Graceful proxy shutdown +nixopus proxy stop + +# Stop with detailed logging +nixopus proxy stop --verbose + +# Preview stop operation +nixopus proxy stop --dry-run + +# Stop with custom admin port +nixopus proxy stop --proxy-port 2019 +``` + +## Configuration + +The proxy command reads configuration values from the built-in `config.prod.yaml` file to determine the default Caddy admin port. + +### Default Configuration Values + +| Setting | Default Value | Configuration Path | Description | +|---------|---------------|-------------------|-------------| +| Proxy Port | `2019` | `services.caddy.env.PROXY_PORT` | Caddy admin API port | +| Timeout | `10` seconds | N/A | Operation timeout (hardcoded default) | + +### Configuration Source + +Configuration is loaded from the built-in `config.prod.yaml`: + +```yaml +# Built-in configuration +services: + caddy: + env: + PROXY_PORT: ${PROXY_PORT:-2019} +``` + +### Overriding Configuration + +You can override defaults using command-line options: + +```bash +# Use custom admin port +nixopus proxy status --proxy-port 8080 + +# Use custom config file +nixopus proxy load --config-file /custom/caddy.json + +# Combine both +nixopus proxy load --proxy-port 8080 --config-file /custom/caddy.json +``` + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Connection refused | Caddy admin API not running | Start Caddy or check admin port | +| Configuration file not found | Invalid config file path | Check file path and permissions | +| Invalid configuration | Malformed Caddy config | Validate JSON/config syntax | +| Permission denied | Insufficient network permissions | Use sudo or check port availability | +| Operation timeout | Network or server issues | Increase timeout with `--timeout` option | diff --git a/docs/cli/commands/service.md b/docs/cli/commands/service.md new file mode 100644 index 00000000..3eec7bf3 --- /dev/null +++ b/docs/cli/commands/service.md @@ -0,0 +1,205 @@ +# service - Docker Compose Service Management + +The `service` command provides comprehensive control over Nixopus services using Docker Compose integration. Manage the lifecycle of all Nixopus components including API, web interface, database, and proxy services. + +## Quick Start +```bash +# Start all services +nixopus service up --detach + +# Check service status +nixopus service ps + +# Restart specific service +nixopus service restart --name api + +# Stop all services +nixopus service down +``` + +## Overview + +The service command acts as a Docker Compose wrapper with Nixopus-specific enhancements: +- Service lifecycle management (start, stop, restart, status) +- Environment-specific configuration loading +- Custom Docker Compose file support +- Service-specific targeting + +## Subcommands + +### `up` - Start Services + +Start Nixopus services with dependency orchestration. + +```bash +nixopus service up [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--name` | `-n` | Specific service name | `all` | +| `--detach` | `-d` | Run services in background | `false` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | | Preview operation without executing | `false` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--compose-file` | `-f` | Custom Docker Compose file path | `/etc/nixopus/source/docker-compose.yml` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Start all services in foreground +nixopus service up + +# Start all services in background +nixopus service up --detach + +# Start specific service +nixopus service up --name api + +# Preview operation +nixopus service up --dry-run +``` + +### `down` - Stop Services + +Stop Nixopus services with graceful shutdown. + +```bash +nixopus service down [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--name` | `-n` | Specific service name | `all` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | | Preview operation without executing | `false` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--compose-file` | `-f` | Custom Docker Compose file path | `/etc/nixopus/source/docker-compose.yml` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Stop all services +nixopus service down + +# Stop specific service +nixopus service down --name api + +# Preview operation +nixopus service down --dry-run +``` + +### `ps` - Show Service Status + +Display status information for Nixopus services. + +```bash +nixopus service ps [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--name` | `-n` | Filter by specific service name | `all` | +| `--verbose` | `-v` | Show detailed service information | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | `-d` | Preview operation without executing | `false` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--compose-file` | `-f` | Custom Docker Compose file path | `/etc/nixopus/source/docker-compose.yml` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Show all services +nixopus service ps + +# Show specific service +nixopus service ps --name api + +# Get JSON output +nixopus service ps --output json +``` + +### `restart` - Restart Services + +Restart services with configurable restart strategies. + +```bash +nixopus service restart [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--name` | `-n` | Specific service name | `all` | +| `--verbose` | `-v` | Show detailed logging | `false` | +| `--output` | `-o` | Output format (text, json) | `text` | +| `--dry-run` | `-d` | Preview operation without executing | `false` | +| `--env-file` | `-e` | Custom environment file path | None | +| `--compose-file` | `-f` | Custom Docker Compose file path | `/etc/nixopus/source/docker-compose.yml` | +| `--timeout` | `-t` | Operation timeout in seconds | `10` | + +**Examples:** + +```bash +# Restart all services +nixopus service restart + +# Restart specific service +nixopus service restart --name api + +# Preview operation +nixopus service restart --dry-run +``` + +## Configuration + +The service command reads configuration values from the built-in `config.prod.yaml` file to determine default compose file location. + +### Default Configuration Values + +| Setting | Default Value | Configuration Path | Description | +|---------|---------------|-------------------|-------------| +| Compose File | `source/docker-compose.yml` | `compose-file-path` | Docker Compose file path (relative to config dir) | +| Config Directory | `/etc/nixopus` | `nixopus-config-dir` | Base configuration directory | +| Timeout | `10` seconds | N/A | Operation timeout (hardcoded default) | + +### Configuration Source + +Configuration is loaded from the built-in `config.prod.yaml`: + +```yaml +# Built-in configuration +nixopus-config-dir: /etc/nixopus +compose-file-path: source/docker-compose.yml +``` + +### Overriding Configuration + +You can override defaults using command-line options: + +```bash +# Use custom compose file +nixopus service up --compose-file /custom/docker-compose.yml + +# Use custom environment file +nixopus service up --env-file /custom/.env + +# Combine both +nixopus service up --compose-file /custom/compose.yml --env-file /custom/.env +``` + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Compose file not found | Docker Compose file missing | Check file path or use `--compose-file` option | +| Docker daemon not running | Docker service stopped | Start Docker service: `sudo systemctl start docker` | +| Port already in use | Service running on required port | Stop conflicting service or change port configuration | +| Permission denied | Insufficient Docker permissions | Add user to docker group or use `sudo` | +| Service startup timeout | Service taking too long to start | Increase timeout with `--timeout` option | diff --git a/docs/cli/commands/test.md b/docs/cli/commands/test.md new file mode 100644 index 00000000..0a582618 --- /dev/null +++ b/docs/cli/commands/test.md @@ -0,0 +1,93 @@ +# test - CLI Testing Utilities + +The `test` command runs tests for the Nixopus CLI in development environments. This command is restricted to development environments only. + +## Quick Start +```bash +# Set development environment (required) +export ENV=DEVELOPMENT + +# Run all tests +nixopus test + +# Run specific test target +nixopus test version +``` + +## Overview + +The test command provides basic testing capabilities for the Nixopus CLI. It requires the `ENV=DEVELOPMENT` environment variable to prevent accidental execution in production. + +## Command Syntax + +```bash +nixopus test [TARGET] +``` + +| Argument | Description | Required | +|----------|-------------|----------| +| `TARGET` | Specific test target (e.g., version) | No | + +**Examples:** + +```bash +# Set development environment first (required) +export ENV=DEVELOPMENT + +# Run all tests +nixopus test + +# Run specific command tests +nixopus test version +``` + +## Configuration + +The test command does not use external configuration files. It operates with environment variable requirements. + +### Environment Requirements + +| Setting | Required Value | Description | +|---------|---------------|-------------| +| ENV | `DEVELOPMENT` | Must be set to enable testing | + +### Configuration Source + +The test command requires the `ENV=DEVELOPMENT` environment variable to be set: + +```bash +# Required environment setup +export ENV=DEVELOPMENT +``` + +### Overriding Configuration + +You can specify different test targets using the command argument: + +```bash +# Test specific command +nixopus test version + +# Test different command +nixopus test install +``` + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Environment not development | ENV not set to DEVELOPMENT | Set `export ENV=DEVELOPMENT` | +| Test dependencies missing | Development packages not installed | Install with `poetry install --with dev` | +| Permission denied | File system permissions | Use `sudo` if necessary | + +If permission issues occur, use sudo: +```bash +sudo nixopus test +``` + +## Related Commands + +- **[version](./version.md)** - Check CLI version before running tests +- **[preflight](./preflight.md)** - Validate test environment setup \ No newline at end of file diff --git a/docs/cli/commands/uninstall.md b/docs/cli/commands/uninstall.md new file mode 100644 index 00000000..1aafeb70 --- /dev/null +++ b/docs/cli/commands/uninstall.md @@ -0,0 +1,100 @@ +# uninstall - Complete Nixopus Removal + +The `uninstall` command completely removes Nixopus from your system. This is a destructive operation that permanently removes all Nixopus components. + +## Quick Start +```bash +# Standard uninstallation +nixopus uninstall + +# Preview what will be removed +nixopus uninstall --dry-run --verbose + +# Force uninstallation without prompts +nixopus uninstall --force +``` + +## Overview + +The uninstall command completely removes Nixopus from your system including services, configuration files, and data. + +## Command Syntax + +```bash +nixopus uninstall [OPTIONS] +``` + +| Option | Short | Description | Default | +|--------|-------|-------------|---------| +| `--verbose` | `-v` | Show detailed uninstallation progress | `false` | +| `--timeout` | `-t` | Operation timeout in seconds | `300` | +| `--dry-run` | `-d` | Preview what would be removed without executing | `false` | +| `--force` | `-f` | Skip confirmation prompts and force removal | `false` | + +**Examples:** + +```bash +# Interactive uninstallation +nixopus uninstall + +# Preview uninstallation +nixopus uninstall --dry-run --verbose + +# Force uninstallation without prompts +nixopus uninstall --force + +# Custom timeout +nixopus uninstall --timeout 600 --verbose +``` + +## Configuration + +The uninstall command does not use external configuration files. It operates with hardcoded default values. + +### Default Configuration Values + +| Setting | Default Value | Description | +|---------|---------------|-------------| +| Timeout | `300` seconds | Maximum time to wait for each uninstallation step | +| Verbose | `false` | Show detailed logging during uninstallation | +| Dry Run | `false` | Preview mode without making actual changes | +| Force | `false` | Skip confirmation prompts | + +### Overriding Configuration + +You can override defaults using command-line options: + +```bash +# Use custom timeout +nixopus uninstall --timeout 600 + +# Enable verbose logging +nixopus uninstall --verbose + +# Preview without changes +nixopus uninstall --dry-run + +# Force uninstall without prompts +nixopus uninstall --force +``` + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Permission denied | Insufficient file system permissions | Use `sudo nixopus uninstall` | +| Services still running | Docker containers won't stop | Force stop with `docker stop` command | +| Files in use | Configuration files locked | Close applications using Nixopus files | +| Timeout exceeded | Uninstall taking too long | Increase timeout with `--timeout` option | + +If permission issues occur, use sudo: +```bash +sudo nixopus uninstall --verbose +``` + +## Related Commands + +- **[service](./service.md)** - Stop services before uninstalling +- **[conf](./conf.md)** - Backup configuration before removal \ No newline at end of file diff --git a/docs/cli/commands/version.md b/docs/cli/commands/version.md new file mode 100644 index 00000000..6ea434cc --- /dev/null +++ b/docs/cli/commands/version.md @@ -0,0 +1,59 @@ +# version - CLI Version Information + +The `version` command displays the current version of the Nixopus CLI. Essential for troubleshooting and support requests. + +## Quick Start +```bash +# Display version information +nixopus version + +# Short version flag +nixopus --version +``` + +## Overview + +The version command provides basic version information about the Nixopus CLI installation using the package metadata. + +## Command Syntax + +```bash +nixopus version +``` + +**Alternative Forms:** +```bash +nixopus --version +nixopus -v +``` + +**Examples:** + +```bash +# Display version information +nixopus version + +# Alternative syntax +nixopus --version +nixopus -v +``` + +## Configuration + +The version command does not use external configuration. It reads version information directly from the installed package metadata using Python's `importlib.metadata.version()`. + +### Version Source + +The version is determined from: +- **Package metadata** - Installed package version from `importlib.metadata` +- **Display formatting** - Rich console formatting for output + +## Error Handling + +Common error scenarios and solutions: + +| Error | Cause | Solution | +|-------|-------|----------| +| Package not found | CLI not properly installed | Reinstall using `poetry install nixopus` | +| Import error | Python environment issues | Check Python installation and PATH | +| Permission denied | File system permissions | Check package installation permissions | diff --git a/docs/cli/config.md b/docs/cli/config.md new file mode 100644 index 00000000..21f709c3 --- /dev/null +++ b/docs/cli/config.md @@ -0,0 +1,114 @@ +# CLI Configuration + +The Nixopus CLI uses a built-in YAML configuration file that defines default values for all commands. + +## Configuration File + +The CLI reads configuration from: +``` +helpers/config.prod.yaml +``` + +This file is bundled with the CLI and contains production-ready defaults that can be overridden through environment variables. + +## Key Configuration Sections + +### Service Defaults +```yaml +services: + api: + env: + PORT: ${API_PORT:-8443} + DB_NAME: ${DB_NAME:-postgres} + USERNAME: ${USERNAME:-postgres} + PASSWORD: ${PASSWORD:-changeme} + # ... other API settings + + view: + env: + PORT: ${VIEW_PORT:-7443} + NEXT_PUBLIC_PORT: ${NEXT_PUBLIC_PORT:-7443} + # ... other view settings + + caddy: + env: + PROXY_PORT: ${PROXY_PORT:-2019} + API_DOMAIN: ${API_DOMAIN:-} + VIEW_DOMAIN: ${VIEW_DOMAIN:-} + # ... other proxy settings +``` + +### System Dependencies +```yaml +deps: + curl: { package: "curl", command: "curl" } + python3: { package: "python3", command: "python3" } + git: { package: "git", command: "git" } + docker.io: { package: "docker.io", command: "docker" } + openssl: { package: "openssl", command: "openssl" } + openssh-client: { package: "openssh-client", command: "ssh" } +``` + +### Network Ports +```yaml +ports: [2019, 80, 443, 7443, 8443, 6379, 5432] +``` + +### Repository Settings +```yaml +clone: + repo: "https://github.com/raghavyuva/nixopus" + branch: "master" + source-path: source +``` + +### SSH Configuration +```yaml +ssh_key_size: 4096 +ssh_key_type: ed25519 +ssh_file_path: ssh/id_rsa +``` + +### File Paths +```yaml +nixopus-config-dir: /etc/nixopus +compose-file-path: source/docker-compose.yml +``` + +## Environment Variable Overrides + +All configuration values use environment variable expansion: +```yaml +PORT: ${API_PORT:-8443} # Uses API_PORT if set, otherwise 8443 +``` + +**Common overrides:** +```bash +# Override API domain +export API_DOMAIN=api.example.com + +# Override database credentials +export USERNAME=myuser +export PASSWORD=mypassword + +# Override ports +export API_PORT=9443 +export VIEW_PORT=8443 +``` + +## Command Usage + +Commands read specific configuration sections: + +| Command | Configuration Used | +|---------|-------------------| +| **preflight** | `ports`, `deps` | +| **install** | Service defaults, paths, SSH settings | +| **service** | Service environment variables | +| **conf** | Service environment configurations | +| **proxy** | `services.caddy.env` settings | +| **clone** | `clone` repository settings | + +## Configuration Access + +Commands access configuration through the CLI's config system - users don't need to manage the configuration file directly. Use command-line options and environment variables to customize behavior. \ No newline at end of file diff --git a/docs/cli/development.md b/docs/cli/development.md new file mode 100644 index 00000000..e61a2a31 --- /dev/null +++ b/docs/cli/development.md @@ -0,0 +1,107 @@ +# CLI Development + +Basic development guide for contributing to the Nixopus CLI. + +## Project Structure + +``` +cli/ +├── app/ # Main application package +│ ├── main.py # CLI entry point +│ ├── commands/ # Command implementations +│ └── utils/ # Shared utilities +├── pyproject.toml # Project configuration (Poetry) +├── poetry.lock # Dependency lock file +└── Makefile # Development commands +``` + +## Development Setup + +### Prerequisites +- Python 3.9+ +- Poetry (for dependency management) +- Git + +### Installation + +```bash +# Clone and navigate +git clone https://github.com/raghavyuva/nixopus.git +cd nixopus/cli + +# Install dependencies +poetry install --with dev + +# Activate virtual environment +poetry shell + +# Install CLI in development mode +pip install -e . + +# Verify installation +nixopus --help +``` + +## Testing + +```bash +# Set development environment (required for tests) +export ENV=DEVELOPMENT + +# Run tests +make test + +# Run with coverage +make test-cov + +# Run specific test +poetry run pytest tests/test_commands_version.py +``` + +## Available Make Commands + +```bash +make help # Show available commands +make install # Install dependencies +make test # Run test suite +make test-cov # Run tests with coverage +make lint # Run code linting +make format # Format code +make clean # Clean build artifacts +make build # Build distribution +``` + +## Contributing + +1. **Create a branch** + ```bash + git checkout -b feature/your-feature + ``` + +2. **Make changes and test** + ```bash + export ENV=DEVELOPMENT + make test + ``` + +3. **Commit and submit pull request** + ```bash + git add . + git commit -m "Description of changes" + git push origin feature/your-feature + ``` + +## Dependencies + +### Core Dependencies +- **typer**: CLI framework +- **rich**: Terminal formatting +- **pydantic**: Data validation +- **requests**: HTTP library +- **pyyaml**: YAML parsing + +### Development Dependencies +- **pytest**: Testing framework +- **pytest-cov**: Coverage reporting +- **black**: Code formatting +- **flake8**: Code linting \ No newline at end of file diff --git a/docs/cli/index.md b/docs/cli/index.md new file mode 100644 index 00000000..d827e473 --- /dev/null +++ b/docs/cli/index.md @@ -0,0 +1,115 @@ +# Nixopus CLI + +Command line interface for managing Nixopus applications and services. Built with Python and Typer, providing an intuitive terminal experience for deployment and management. + +## Quick Start + +```bash +# Install CLI +curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/master/cli/install.sh | bash + +# Verify installation +nixopus --help + +# Check system requirements +nixopus preflight check + +# Install Nixopus +nixopus install +``` + +## Available Commands + +| Command | Description | Key Subcommands | +|---------|-------------|-----------------| +| **[preflight](./commands/preflight.md)** | System readiness checks | check, ports, deps | +| **[install](./commands/install.md)** | Complete Nixopus installation | ssh, deps | +| **[uninstall](./commands/uninstall.md)** | Remove Nixopus from system | - | +| **[service](./commands/service.md)** | Control Docker services | up, down, ps, restart | +| **[conf](./commands/conf.md)** | Manage application settings | list, set, delete | +| **[proxy](./commands/proxy.md)** | Caddy proxy management | load, status, stop | +| **[clone](./commands/clone.md)** | Repository cloning with Git | - | +| **[version](./commands/version.md)** | Display CLI version information | - | +| **[test](./commands/test.md)** | Run CLI tests (development only) | - | + +## Common Workflows + +### Initial Setup +```bash +# 1. Check system requirements +nixopus preflight check + +# 2. Install with custom domains +nixopus install --api-domain api.example.com --view-domain app.example.com + +# 3. Start services +nixopus service up --detach + +# 4. Load proxy configuration +nixopus proxy load + +# 5. Verify everything is running +nixopus service ps +``` + +### Configuration Management +```bash +# View current configuration +nixopus conf list --service api + +# Update settings +nixopus conf set DATABASE_URL=postgresql://user:pass@localhost:5432/nixopus + +# Restart services to apply changes +nixopus service restart +``` + +### Development Setup +```bash +# Clone repository +nixopus clone --branch develop + +# Preview installation +nixopus install --dry-run + +# Start development environment +nixopus service up --env-file .env.development + +# Run tests +export ENV=DEVELOPMENT +nixopus test +``` + +## Global Options + +Most commands support these options: + +| Option | Shorthand | Description | +|--------|-----------|-------------| +| `--verbose` | `-v` | Show detailed output | +| `--output` | `-o` | Output format (text, json) | +| `--dry-run` | `-d` | Preview without executing | +| `--timeout` | `-t` | Operation timeout in seconds | +| `--help` | | Show command help | + +## Getting Help + +```bash +# General help +nixopus --help + +# Command-specific help +nixopus install --help +nixopus service --help + +# Subcommand help +nixopus service up --help +``` + +## Installation + +See the [Installation Guide](./installation.md) for detailed setup instructions including binary installation, Poetry setup, and development environment configuration. + +## Development + +See the [Development Guide](./development.md) for information on contributing to the CLI, project structure, and testing procedures. \ No newline at end of file diff --git a/docs/cli/installation.md b/docs/cli/installation.md new file mode 100644 index 00000000..78a03658 --- /dev/null +++ b/docs/cli/installation.md @@ -0,0 +1,213 @@ +# CLI Installation + +Installation guide for the Nixopus CLI with multiple installation options. + +## Prerequisites + +- **Python 3.9 or higher** (supports up to Python 3.13) +- **Git** (for source installation) + +Verify your Python version: +```bash +python3 --version +``` + +## Installation Options + +### Option 1: Binary Installation (Recommended) + +Download and install the pre-built binary for your platform: + +```bash +# Download and run the install script +curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/master/cli/install.sh | bash + +# Or for local installation (no sudo required) +curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/master/cli/install.sh | bash -s -- --local +``` + +**Install script options:** +- `--local`: Install to `~/.local/bin` (no sudo required) +- `--dir DIR`: Install to custom directory +- `--no-path`: Don't update PATH automatically + +**Manual binary installation:** +```bash +# Download the appropriate binary for your platform +wget https://github.com/raghavyuva/nixopus/releases/latest/download/nixopus_$(uname -s | tr '[:upper:]' '[:lower:]')_$(uname -m) + +# Make executable and install +chmod +x nixopus_* +sudo mv nixopus_* /usr/local/bin/nixopus + +# Or install locally without sudo +mkdir -p ~/.local/bin +mv nixopus_* ~/.local/bin/nixopus +``` + +### Option 2: Poetry Installation (For Development) + +Using Poetry for development work: + +```bash +# Clone repository +git clone https://github.com/raghavyuva/nixopus.git +cd nixopus/cli + +# Install with Poetry +poetry install + +# Activate virtual environment +poetry shell + +# Verify installation +nixopus --help +``` + +### Option 3: Python Package Installation + +Install from source using pip: + +```bash +# Clone repository +git clone https://github.com/raghavyuva/nixopus.git +cd nixopus/cli + +# Install in development mode +pip install -e . + +# Or install from wheel (if available) +pip install dist/nixopus-0.1.0-py3-none-any.whl +``` + +### Option 4: Build from Source + +Build your own binary: + +```bash +# Clone repository +git clone https://github.com/raghavyuva/nixopus.git +cd nixopus/cli + +# Install Poetry dependencies +poetry install --with dev + +# Build binary +./build.sh + +# Install the built binary +./install.sh --local +``` + +## Verification + +After installation, verify the CLI is working: + +```bash +nixopus --help +nixopus version +``` + +Expected output: +``` +┌───────────────── Version Info ─────────────────┐ +│ Nixopus CLI v0.1.0 │ +└─────────────────────────────────────────────────┘ +``` + +## Troubleshooting + +### Command Not Found + +If `nixopus` command is not found: + +```bash +# Check if binary is in PATH +which nixopus + +# For local installation, add to PATH +echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc +source ~/.bashrc + +# Or for zsh +echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.zshrc +source ~/.zshrc +``` + +### Permission Errors + +For permission issues during installation: + +```bash +# Use local installation +curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/cli/install.sh | bash -s -- --local + +# Or install to custom directory +curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/cli/install.sh | bash -s -- --dir ~/bin +``` + +### Python Version Issues + +For Python version compatibility issues: + +```bash +# Check Python version +python3 --version + +# Install specific Python version if needed (example for Ubuntu) +sudo apt update +sudo apt install python3.9 + +# Or use pyenv for version management +curl https://pyenv.run | bash +pyenv install 3.9.0 +pyenv local 3.9.0 +``` + +## Development Installation + +For contributing to the CLI: + +```bash +# Clone and setup +git clone https://github.com/raghavyuva/nixopus.git +cd nixopus/cli + +# Install with development dependencies +poetry install --with dev + +# Activate environment +poetry shell + +# Run tests to verify setup +make test +``` + +Available development commands: +```bash +make help # Show available commands +make test # Run test suite +make test-cov # Run tests with coverage +make build # Build binary +make format # Format code +make lint # Run linting +make clean # Clean build artifacts +``` + +## Uninstallation + +To uninstall the CLI: + +```bash +# For binary installation +sudo rm /usr/local/bin/nixopus +# Or for local installation +rm ~/.local/bin/nixopus + +# For Poetry installation +cd nixopus/cli +poetry env remove python + +# For pip installation +pip uninstall nixopus +``` \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index cb61f1ae..bf873452 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,14 +5,13 @@ hero: name: "Nixopus" text: "Documentation" tagline: All the information you need to know about Nixopus - actions: - - theme: brand - text: Introduction - link: /introduction/index.md - - theme: alt - text: Installation - link: /install/index.md --- - - +
+
+
+ sudo bash -c "$(curl -sSL https://raw.githubusercontent.com/raghavyuva/nixopus/refs/heads/master/scripts/install.sh)" +
+ +
+
diff --git a/helpers/config.dev.yaml b/helpers/config.dev.yaml new file mode 100644 index 00000000..15beab1d --- /dev/null +++ b/helpers/config.dev.yaml @@ -0,0 +1,157 @@ +version: 1 +services: + api: + env: + PORT: ${API_PORT:-8080} + DB_NAME: ${DB_NAME:-postgres} + USERNAME: ${USERNAME:-postgres} + PASSWORD: ${PASSWORD:-changeme} + HOST_NAME: ${DB_HOST:-localhost} + DB_PORT: ${DB_PORT:-5432} + SSL_MODE: ${DB_SSL_MODE:-disable} + MOUNT_PATH: ${MOUNT_PATH:-./configs} + SSH_HOST: ${SSH_HOST:-localhost} + SSH_PORT: ${SSH_PORT:-22} + SSH_USER: ${SSH_USER:-$USER} + SSH_PRIVATE_KEY: ${SSH_PRIVATE_KEY:-~/.ssh/id_ed25519_nixopus} + SSH_PASSWORD: ${SSH_PASSWORD:-} + DOCKER_HOST: ${DOCKER_HOST:-unix:///var/run/docker.sock} + REDIS_URL: ${REDIS_URL:-redis://localhost:6379} + ALLOWED_ORIGIN: ${ALLOWED_ORIGIN:-http://localhost:7443} + ENV: ${ENV:-development} + LOGS_PATH: ${LOGS_PATH:-./logs} + API_VOLUME: ${API_VOLUME:-./configs} + DOCKER_PORT: ${DOCKER_PORT:-2376} + APP_VERSION: ${APP_VERSION:-0.1.0-alpha.11} + + view: + env: + PORT: ${VIEW_PORT:-7443} + WEBSOCKET_URL: ${WEBSOCKET_URL:-} + API_URL: ${API_URL:-} + WEBHOOK_URL: ${WEBHOOK_URL:-} + NEXT_PUBLIC_PORT: ${NEXT_PUBLIC_PORT:-7443} + LOGS_PATH: ${LOGS_PATH:-./logs} + + redis: + env: + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_VOLUME: ${REDIS_VOLUME:-./redis} + REDIS_IMAGE: ${REDIS_IMAGE:-redis:7-alpine} + REDIS_CONTAINER_NAME: ${REDIS_CONTAINER_NAME:-nixopus-redis-container} + + db: + env: + DB_PORT: ${DB_PORT:-5432} + DB_VOLUME: ${DB_VOLUME:-./db} + DB_IMAGE: ${DB_IMAGE:-postgres:14-alpine} + DB_CONTAINER_NAME: ${DB_CONTAINER_NAME:-nixopus-db-container} + POSTGRES_USER: ${USERNAME:-postgres} + POSTGRES_PASSWORD: ${PASSWORD:-changeme} + POSTGRES_DB: ${DB_NAME:-postgres} + POSTGRES_HOST_AUTH_METHOD: trust + +networks: + default: + name: nixopus-network + driver: bridge + +deps: + curl: + package: "curl" + command: "curl" + version: ">=7.80.0, <8.0.0" + version-command: ["curl", "--version"] + python3: + package: "python3" + command: "python3" + version: ">=3.8, <3.13" + version-command: ["python3", "--version"] + python3-venv: + package: "python3-venv" + command: "" + version: ">=3.8, <3.13" + version-command: ["python3", "--version"] + poetry: + package: "poetry" + command: "poetry" + version: ">=1.2.0, <2.0.0" + version-command: ["poetry", "--version"] + redis: + package: "redis-server" + command: "redis-server" + version: ">=7.0.0, <8.0.0" + version-command: ["redis-server", "--version"] + git: + package: "git" + command: "git" + version: ">=2.30.0, <3.0.0" + version-command: ["git", "--version"] + docker.io: + package: "docker.io" + command: "docker" + install_command: | + curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh + version: ">=20.10.0, <26.0.0" + version-command: ["docker", "--version"] + openssl: + package: "openssl" + command: "openssl" + version: ">=1.1.1, <4.0.0" + version-command: ["openssl", "version"] + open-ssh: + version: ">=8.0, <10.0" + version-command: ["ssh", "-V"] + openssh-client: + package: "openssh-client" + command: "ssh" + version-command: ["ssh", "-V"] + openssh-server: + package: "openssh-server" + command: "sshd" + version: ">=8.0, <10.0" + version-command: ["sshd", "-V"] + go: + package: "go" + command: "go" + version: ">=1.23.0, <2.0.0" + version-command: ["go", "version"] + node: + package: "node" + command: "node" + version: ">=18.0.0, <22.0.0" + version-command: ["node", "--version"] + npm: + package: "npm" + command: "npm" + version-command: ["npm", "--version"] + yarn: + package: "yarn" + command: "yarn" + version-command: ["yarn", "--version"] + python: + package: "python" + command: "python" + version-command: ["python", "--version"] + postgresql: + package: "postgresql" + command: "psql" + version-command: ["psql", "--version"] + air: + package: "air" + command: "air" + version: ">=1.40.0, <2.0.0" + version-command: ["air", "-v"] + +nixopus-config-dir: ./nixopus-dev +compose-file-path: docker-compose.yml +clone: + repo: "https://github.com/raghavyuva/nixopus" + branch: "feat/dev_environment" + source-path: . + +ports: [8080, 7443, 6379, 5432, 22] +ssh_key_size: 4096 +ssh_key_type: ed25519 +ssh_passphrase: +ssh_file_path: ~/.ssh/id_ed25519_nixopus diff --git a/helpers/config.json b/helpers/config.json deleted file mode 100644 index f80a630d..00000000 --- a/helpers/config.json +++ /dev/null @@ -1,269 +0,0 @@ -{ - "production": { - "config_dir": "/etc/nixopus", - "api_port": 8443, - "next_public_port": 7443, - "db_port": 5432, - "host_name": "nixopus-db", - "redis_url": "redis://nixopus-redis:6379", - "mount_path": "/etc/nixopus/configs", - "docker_host": "tcp://{ip}:2376", - "docker_port": 2376, - "docker": { - "required_version": "20.10.0", - "required_compose_version": "2.0.0", - "port": "2376", - "cert_path": "/etc/nixopus/docker-certs", - "context": "nixopus-production" - }, - "ssh": { - "port": 22, - "user": "root", - "key_bits": 4096, - "key_type": "rsa" - }, - "caddy": { - "endpoint": "http://nixopus-caddy:2019", - "data_volume": "/etc/nixopus/caddy/data", - "config_volume": "/etc/nixopus/caddy/config", - "config_path": "helpers/caddy.json", - "admin_port": "2019", - "reverse_proxy": { - "app": "nixopus-view:7443", - "api": "nixopus-api:8443" - } - }, - "database": { - "name_prefix": "nixopus_", - "user_prefix": "nixopus_", - "name_length": 8, - "user_length": 8, - "password_length": 16, - "ssl_mode": "disable" - }, - "version": { - "file_path": "version.txt" - }, - "urls": { - "api": { - "pattern": "{protocol}://{host}/api", - "protocols": { - "secure": "https", - "insecure": "http" - } - }, - "websocket": { - "pattern": "{protocol}://{host}/ws", - "protocols": { - "secure": "wss", - "insecure": "ws" - } - }, - "webhook": { - "pattern": "{protocol}://{host}/api/v1/webhook", - "protocols": { - "secure": "https", - "insecure": "http" - } - }, - "app": { - "pattern": "{protocol}://{host}", - "protocols": { - "secure": "https", - "insecure": "http" - } - } - }, - "directories": { - "ssh": "ssh", - "source": "source", - "api": "source/api", - "view": "source/view", - "db": "db", - "caddy": { - "data": "caddy/data", - "config": "caddy/config" - } - }, - "files": { - "env": ".env", - "env_sample": ".env.sample", - "permissions": { - "env": "600", - "private_key": "600", - "public_key": "644", - "authorized_keys": "600" - } - }, - "system": { - "supported_os": ["Linux"], - "required_tools": ["curl", "docker"] - }, - "api": { - "health_endpoint": "/api/v1/health", - "register_endpoint": "/api/v1/auth/register" - }, - "source": "/etc/nixopus/source", - "compose": { - "file": "docker-compose.yml" - }, - "containers": { - "nixopus-api-container": "API service", - "nixopus-db-container": "Database service", - "nixopus-view-container": "View service", - "nixopus-caddy-container": "Caddy service" - }, - "errors": { - "invalid_environment": "Invalid environment: {env}. Must be either 'production' or 'staging'", - "config_not_found": "Configuration file not found at {path}", - "invalid_json": "Invalid JSON in configuration file at {path}", - "env_not_found": "Configuration for environment '{env}' not found in config file", - "missing_keys": "Missing required configuration keys for environment '{env}': {keys}", - "invalid_type": "Invalid configuration value type in environment '{env}': {error}", - "invalid_url_type": "Invalid URL type: {type}", - "invalid_dir_type": "Invalid directory type: {type}", - "invalid_subdir_type": "Invalid sub-directory type: {type}", - "ssh_keygen_failed": "Failed to generate SSH key", - "ssh_keygen_not_found": "ssh-keygen not found: {error}", - "ssh_key_error": "Error generating SSH key: {error}", - "auth_keys_error": "Error setting up authorized_keys: {error}", - "file_read_error": "File read error: {error}", - "file_write_error": "File write error: {error}", - "invalid_domain": "Invalid domain format. Domains must be valid hostnames", - "setup_error": "Error setting up environment: {error}" - } - }, - "staging": { - "config_dir": "/etc/nixopus-staging", - "api_port": 8444, - "next_public_port": 7444, - "db_port": 5433, - "host_name": "nixopus-staging-db", - "redis_url": "redis://nixopus-staging-redis:6380", - "mount_path": "/etc/nixopus-staging/configs", - "docker_host": "tcp://{ip}:2377", - "docker_port": 2377, - "docker": { - "required_version": "20.10.0", - "required_compose_version": "2.0.0", - "port": "2377", - "cert_path": "/etc/nixopus-staging/docker-certs", - "context": "nixopus-staging" - }, - "ssh": { - "port": 22, - "user": "root", - "key_bits": 4096, - "key_type": "rsa" - }, - "caddy": { - "endpoint": "http://nixopus-caddy:2019", - "data_volume": "/etc/nixopus-staging/caddy/data", - "config_volume": "/etc/nixopus-staging/caddy/config", - "config_path": "helpers/caddy.json", - "admin_port": "2019", - "reverse_proxy": { - "app": "nixopus-staging-view:7444", - "api": "nixopus-staging-api:8444" - } - }, - "database": { - "name_prefix": "nixopus_", - "user_prefix": "nixopus_", - "name_length": 8, - "user_length": 8, - "password_length": 16, - "ssl_mode": "disable" - }, - "version": { - "file_path": "version.txt" - }, - "urls": { - "api": { - "pattern": "{protocol}://{host}/api", - "protocols": { - "secure": "https", - "insecure": "http" - } - }, - "websocket": { - "pattern": "{protocol}://{host}/ws", - "protocols": { - "secure": "wss", - "insecure": "ws" - } - }, - "webhook": { - "pattern": "{protocol}://{host}/api/v1/webhook", - "protocols": { - "secure": "https", - "insecure": "http" - } - }, - "app": { - "pattern": "{protocol}://{host}", - "protocols": { - "secure": "https", - "insecure": "http" - } - } - }, - "directories": { - "ssh": "ssh", - "source": "source", - "api": "source/api", - "view": "source/view", - "db": "db", - "caddy": { - "data": "caddy/data", - "config": "caddy/config" - } - }, - "files": { - "env": ".env", - "env_sample": ".env.sample", - "permissions": { - "env": "600", - "private_key": "600", - "public_key": "644", - "authorized_keys": "600" - } - }, - "system": { - "supported_os": ["Linux"], - "required_tools": ["curl", "docker"] - }, - "api": { - "health_endpoint": "/api/v1/health", - "register_endpoint": "/api/v1/auth/register" - }, - "source": "/etc/nixopus-staging/source", - "compose": { - "file": "docker-compose-staging.yml" - }, - "containers": { - "nixopus-staging-api": "API service", - "nixopus-staging-db": "Database service", - "nixopus-staging-view": "View service" - }, - "errors": { - "invalid_environment": "Invalid environment: {env}. Must be either 'production' or 'staging'", - "config_not_found": "Configuration file not found at {path}", - "invalid_json": "Invalid JSON in configuration file at {path}", - "env_not_found": "Configuration for environment '{env}' not found in config file", - "missing_keys": "Missing required configuration keys for environment '{env}': {keys}", - "invalid_type": "Invalid configuration value type in environment '{env}': {error}", - "invalid_url_type": "Invalid URL type: {type}", - "invalid_dir_type": "Invalid directory type: {type}", - "invalid_subdir_type": "Invalid sub-directory type: {type}", - "ssh_keygen_failed": "Failed to generate SSH key", - "ssh_keygen_not_found": "ssh-keygen not found: {error}", - "ssh_key_error": "Error generating SSH key: {error}", - "auth_keys_error": "Error setting up authorized_keys: {error}", - "file_read_error": "File read error: {error}", - "file_write_error": "File write error: {error}", - "invalid_domain": "Invalid domain format. Domains must be valid hostnames", - "setup_error": "Error setting up environment: {error}" - } - } -} \ No newline at end of file diff --git a/helpers/config.prod.yaml b/helpers/config.prod.yaml new file mode 100644 index 00000000..3c92ff25 --- /dev/null +++ b/helpers/config.prod.yaml @@ -0,0 +1,144 @@ +version: 1 +services: + api: + env: + PORT: ${API_PORT:-8443} + DB_NAME: ${DB_NAME:-postgres} + USERNAME: ${USERNAME:-postgres} + PASSWORD: ${PASSWORD:-changeme} + HOST_NAME: ${DB_HOST:-nixopus-db} + DB_PORT: ${DB_PORT:-5432} + SSL_MODE: ${DB_SSL_MODE:-disable} + MOUNT_PATH: ${MOUNT_PATH:-/etc/nixopus/configs} + SSH_HOST: ${SSH_HOST:-localhost} + SSH_PORT: ${SSH_PORT:-22} + SSH_USER: ${SSH_USER:-root} + SSH_PRIVATE_KEY: ${SSH_PRIVATE_KEY:-/etc/nixopus/ssh/id_rsa} + SSH_PASSWORD: ${SSH_PASSWORD:-} + DOCKER_HOST: ${DOCKER_HOST:-unix:///var/run/docker.sock} + REDIS_URL: ${REDIS_URL:-redis://nixopus-redis:6379} + CADDY_ENDPOINT: ${CADDY_ENDPOINT:-http://nixopus-caddy:2019} + ALLOWED_ORIGIN: ${ALLOWED_ORIGIN:-http://localhost:3000} + ENV: ${ENV:-production} + LOGS_PATH: ${LOGS_PATH:-./logs} + API_ENV_FILE: ${API_ENV_FILE:-/etc/nixopus/source/api/.env} + API_VOLUME: ${API_VOLUME:-/etc/nixopus/configs} + API_IMAGE: ${API_IMAGE:-ghcr.io/raghavyuva/nixopus-api:latest} + API_CONTAINER_NAME: ${API_CONTAINER_NAME:-nixopus-api-container} + DOCKER_PORT: ${DOCKER_PORT:-2376} + APP_VERSION: ${APP_VERSION:-0.1.0-alpha.11} + view: + env: + PORT: ${VIEW_PORT:-7443} + WEBSOCKET_URL: ${WEBSOCKET_URL:-} + API_URL: ${API_URL:-} + WEBHOOK_URL: ${WEBHOOK_URL:-} + NEXT_PUBLIC_PORT: ${NEXT_PUBLIC_PORT:-7443} + LOGS_PATH: ${LOGS_PATH:-./logs} + VIEW_ENV_FILE: ${VIEW_ENV_FILE:-/etc/nixopus/source/view/.env} + VIEW_IMAGE: ${VIEW_IMAGE:-ghcr.io/raghavyuva/nixopus-view:latest} + VIEW_CONTAINER_NAME: ${VIEW_CONTAINER_NAME:-nixopus-view-container} + + redis: + env: + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_VOLUME: ${REDIS_VOLUME:-/etc/nixopus/redis} + REDIS_IMAGE: ${REDIS_IMAGE:-redis:7-alpine} + REDIS_CONTAINER_NAME: ${REDIS_CONTAINER_NAME:-nixopus-redis-container} + + db: + env: + DB_PORT: ${DB_PORT:-5432} + DB_VOLUME: ${DB_VOLUME:-/etc/nixopus/db} + DB_IMAGE: ${DB_IMAGE:-postgres:14-alpine} + DB_CONTAINER_NAME: ${DB_CONTAINER_NAME:-nixopus-db-container} + POSTGRES_USER: ${USERNAME:-postgres} + POSTGRES_PASSWORD: ${PASSWORD:-changeme} + POSTGRES_DB: ${DB_NAME:-postgres} + POSTGRES_HOST_AUTH_METHOD: trust + + caddy: + env: + CADDY_IMAGE: ${CADDY_IMAGE:-caddy:latest} + CADDY_CONTAINER_NAME: ${CADDY_CONTAINER_NAME:-nixopus-caddy-container} + CADDY_DATA_VOLUME: ${CADDY_DATA_VOLUME:-/etc/nixopus/caddy} + CADDY_CONFIG_VOLUME: ${CADDY_CONFIG_VOLUME:-/etc/nixopus/caddy} + CADDY_PORTS: "2019:2019,80:80,443:443" + API_DOMAIN: ${API_DOMAIN:-} + VIEW_DOMAIN: ${VIEW_DOMAIN:-} + BASE_URL: ${BASE_URL:-http://localhost:2019} + PROXY_PORT: ${PROXY_PORT:-2019} + CONFIG_ENDPOINT: ${CONFIG_ENDPOINT:-/config} + LOAD_ENDPOINT: ${LOAD_ENDPOINT:-/load} + STOP_ENDPOINT: ${STOP_ENDPOINT:-/stop} + CADDY_COMMAND: + [ + "caddy", + "run", + "--config", + "/etc/caddy/Caddyfile", + "--adapter", + "caddyfile", + ] + +networks: + default: + name: nixopus-network + driver: bridge + +deps: + python3: + package: "python3" + command: "python3" + version: ">=3.8, <3.13" + version-command: ["python3", "--version"] + python3-venv: + package: "python3-venv" + command: "" + version: ">=3.8, <3.13" + version-command: ["python3", "--version"] + git: + package: "git" + command: "git" + version: ">=2.30.0, <3.0.0" + version-command: ["git", "--version"] + curl: + package: "curl" + command: "curl" + version: ">=7.80.0, <8.0.0" + version-command: ["curl", "--version"] + docker.io: + package: "docker.io" + command: "docker" + install_command: | + curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh + version: ">=20.10.0, <26.0.0" + version-command: ["docker", "--version"] + openssl: + package: "openssl" + command: "openssl" + version: ">=1.1.1, <4.0.0" + version-command: ["openssl", "version"] + openssh-client: + package: "openssh-client" + command: "ssh" + version: ">=8.0, <10.0" + version-command: ["ssh", "-V"] + openssh-server: + package: "openssh-server" + command: "sshd" + version: ">=8.0, <10.0" + version-command: ["sshd", "-V"] + +nixopus-config-dir: /etc/nixopus +compose-file-path: source/docker-compose.yml +clone: + repo: "https://github.com/raghavyuva/nixopus" + branch: "master" + source-path: source + +ports: [2019, 80, 443, 7443, 8443, 6379, 5432] +ssh_key_size: 4096 +ssh_key_type: ed25519 +ssh_passphrase: +ssh_file_path: ssh/id_rsa diff --git a/installer/README.md b/installer/README.md deleted file mode 100644 index 29124c87..00000000 --- a/installer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Nixopus Installer - -This installer script helps you set up and run the Nixopus application on your local machine or server. \ No newline at end of file diff --git a/installer/__init__.py b/installer/__init__.py deleted file mode 100644 index 7202e07f..00000000 --- a/installer/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Installer package for Nixopus. -""" - -import logging - -def setup_logger(debug: bool = False): - logger = logging.getLogger("nixopus") - logger.setLevel(logging.DEBUG if debug else logging.INFO) - - # Create console handler with formatting - handler = logging.StreamHandler() - formatter = logging.Formatter('%(levelname)s: %(message)s') - handler.setFormatter(formatter) - logger.addHandler(handler) - - return logger - -# Create a default logger instance -logger = setup_logger() \ No newline at end of file diff --git a/installer/base_config.py b/installer/base_config.py deleted file mode 100644 index 7842e24b..00000000 --- a/installer/base_config.py +++ /dev/null @@ -1,53 +0,0 @@ -import json -from pathlib import Path -from dataclasses import dataclass -from typing import Dict, Any, TypeVar, Generic, Type -import inspect - -T = TypeVar('T') - -@dataclass -class BaseConfig(Generic[T]): - config_path: Path - env: str - required_keys: list[str] - valid_environments: list[str] - - def load_config(self) -> Dict[str, Any]: - try: - with open(self.config_path, 'r') as f: - return json.load(f) - except FileNotFoundError: - raise Exception(f"Configuration file not found at {self.config_path}") - except json.JSONDecodeError: - raise Exception(f"Invalid JSON in configuration file at {self.config_path}") - - def validate_environment(self) -> None: - if self.env not in self.valid_environments: - raise ValueError(f"Invalid environment: {self.env}. Must be one of {', '.join(self.valid_environments)}") - - def validate_config(self, config: Dict[str, Any]) -> None: - env_config = config.get(self.env) - if not env_config: - raise Exception(f"Configuration for environment '{self.env}' not found in config file") - - missing_keys = [key for key in self.required_keys if key not in env_config] - if missing_keys: - raise Exception(f"Missing required configuration keys for environment '{self.env}': {', '.join(missing_keys)}") - - def create(self, config_class: Type[T]) -> T: - self.validate_environment() - config = self.load_config() - self.validate_config(config) - - try: - env_config = config[self.env].copy() - if 'config_dir' in env_config: - env_config['config_dir'] = Path(env_config['config_dir']) - - config_fields = inspect.signature(config_class).parameters.keys() - filtered_config = {k: v for k, v in env_config.items() if k in config_fields} - - return config_class(**filtered_config) - except (ValueError, TypeError) as e: - raise Exception(f"Invalid configuration type for environment '{self.env}': {str(e)}") from e \ No newline at end of file diff --git a/installer/docker_setup.py b/installer/docker_setup.py deleted file mode 100644 index af84ef65..00000000 --- a/installer/docker_setup.py +++ /dev/null @@ -1,334 +0,0 @@ -import os -import subprocess -import json -import time -import shutil -from pathlib import Path -import socket -import requests - -class DockerSetup: - def __init__(self, env="staging", debug=False): - self.env = env - self.debug = debug - self.context_name = f"nixopus-{self.env}" - # store all the docker config in their respective docker-certs dir - self.config_dir = Path("/etc/nixopus-staging") if env == "staging" else Path("/etc/nixopus") - self.docker_certs_dir = self.config_dir / "docker-certs" - - def debug_print(self, message): - if self.debug: - print(f"[DEBUG] {message}") - - def get_public_ip(self): - try: - response = requests.get('https://api.ipify.org', timeout=10) - response.raise_for_status() # fail on non-2xx - return response.text.strip() - except requests.RequestException: - self.debug_print("Failed to get public IP, falling back to localhost") - return "localhost" - - def get_local_ip(self): - try: - response = socket.gethostbyname(socket.gethostname()) - return response - except socket.gaierror: - self.debug_print("Failed to get local IP, falling back to localhost") - return "localhost" - - def setup_docker_certs(self): - self.debug_print("Setting up Docker certificates...") - self.docker_certs_dir.mkdir(parents=True, exist_ok=True) - local_ip = self.get_local_ip() - - try: - self.debug_print("Generating CA key...") - result = subprocess.run([ - "openssl", "genrsa", "-out", str(self.docker_certs_dir / "ca-key.pem"), "4096" - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate CA key") - - self.debug_print("Generating CA certificate...") - result = subprocess.run([ - "openssl", "req", "-new", "-x509", "-days", "365", - "-key", str(self.docker_certs_dir / "ca-key.pem"), - "-sha256", "-out", str(self.docker_certs_dir / "ca.pem"), - "-subj", f"/CN={self.context_name}" - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate CA certificate") - - self.debug_print("Generating server key...") - result = subprocess.run([ - "openssl", "genrsa", "-out", str(self.docker_certs_dir / "server-key.pem"), "4096" - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate server key") - - with open(self.docker_certs_dir / "extfile.cnf", "w") as f: - f.write(f"subjectAltName = DNS:localhost,IP:{local_ip},IP:127.0.0.1\n") - - self.debug_print("Generating server CSR...") - result = subprocess.run([ - "openssl", "req", "-subj", f"/CN={local_ip}", "-new", - "-key", str(self.docker_certs_dir / "server-key.pem"), - "-out", str(self.docker_certs_dir / "server.csr") - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate server CSR") - - self.debug_print("Generating server certificate...") - result = subprocess.run([ - "openssl", "x509", "-req", "-days", "365", - "-in", str(self.docker_certs_dir / "server.csr"), - "-CA", str(self.docker_certs_dir / "ca.pem"), - "-CAkey", str(self.docker_certs_dir / "ca-key.pem"), - "-CAcreateserial", "-out", str(self.docker_certs_dir / "server-cert.pem"), - "-extfile", str(self.docker_certs_dir / "extfile.cnf") - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate server certificate") - - self.debug_print("Generating client key...") - result = subprocess.run([ - "openssl", "genrsa", "-out", str(self.docker_certs_dir / "key.pem"), "4096" - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate client key") - - self.debug_print("Generating client CSR...") - result = subprocess.run([ - "openssl", "req", "-subj", f"/CN={self.context_name}", "-new", - "-key", str(self.docker_certs_dir / "key.pem"), - "-out", str(self.docker_certs_dir / "client.csr") - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate client CSR") - - self.debug_print("Generating client certificate...") - result = subprocess.run([ - "openssl", "x509", "-req", "-days", "365", - "-in", str(self.docker_certs_dir / "client.csr"), - "-CA", str(self.docker_certs_dir / "ca.pem"), - "-CAkey", str(self.docker_certs_dir / "ca-key.pem"), - "-CAcreateserial", "-out", str(self.docker_certs_dir / "cert.pem") - ], capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to generate client certificate") - - self.debug_print("Setting certificate permissions...") - for cert_file in self.docker_certs_dir.glob("*"): - cert_file.chmod(0o600) - - except Exception as e: - raise Exception(f"Error setting up Docker certificates: {str(e)}") - - def setup_docker_systemd_override(self): - self.debug_print("Setting up Docker systemd override...") - override_dir = Path("/etc/systemd/system/docker.service.d") - override_file = override_dir / "override.conf" - - try: - override_dir.mkdir(parents=True, exist_ok=True) - - override_content = """# Disable flags to dockerd, all settings are done in /etc/docker/daemon.json -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd""" - - override_file.write_text(override_content) - - self.debug_print("Reloading systemd daemon...") - subprocess.run(["systemctl", "daemon-reload"], check=True) - self.debug_print("Restarting Docker service...") - subprocess.run(["systemctl", "restart", "docker"], check=True) - - except Exception as e: - raise Exception(f"Failed to setup Docker systemd override: {str(e)}") - - def setup_docker_daemon_for_tcp(self): - self.debug_print("Setting up Docker daemon for TCP...") - docker_config_dir = Path("/etc/docker") - docker_config_dir.mkdir(parents=True, exist_ok=True) - - docker_port = 2376 if self.env == "production" else 2377 - - self.debug_print(f"Checking if port {docker_port} is available...") - try: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.bind(('0.0.0.0', docker_port)) - sock.close() - except socket.error: - raise Exception(f"Port {docker_port} is already in use") - - daemon_config = { - "hosts": [f"tcp://0.0.0.0:{docker_port}", "unix:///var/run/docker.sock"], - "tls": True, - "tlsverify": True, - "tlscacert": str(self.docker_certs_dir / "ca.pem"), - "tlscert": str(self.docker_certs_dir / "server-cert.pem"), - "tlskey": str(self.docker_certs_dir / "server-key.pem") - } - - daemon_json_path = docker_config_dir / "daemon.json" - - if daemon_json_path.exists(): - self.debug_print("Backing up existing daemon.json...") - backup_path = daemon_json_path.with_suffix('.json.bak') - shutil.copy2(daemon_json_path, backup_path) - - self.debug_print("Writing new daemon.json configuration...") - with open(daemon_json_path, "w") as f: - json.dump(daemon_config, f, indent=2) - - try: - self.debug_print("Stopping Docker service...") - subprocess.run(["systemctl", "stop", "docker"], - capture_output=True, text=True, check=True) - - time.sleep(2) - - self.debug_print("Starting Docker service...") - result = subprocess.run(["systemctl", "start", "docker"], - capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Failed to start Docker service") - - time.sleep(5) - - self.debug_print("Checking Docker service status...") - result = subprocess.run(["systemctl", "is-active", "docker"], - capture_output=True, text=True) - if result.returncode != 0: - if daemon_json_path.with_suffix('.json.bak').exists(): - self.debug_print("Restoring backup configuration...") - shutil.copy2(daemon_json_path.with_suffix('.json.bak'), daemon_json_path) - subprocess.run(["systemctl", "start", "docker"], - capture_output=True, text=True) - raise Exception("Docker service failed to start properly") - - except Exception as e: - result = subprocess.run(["journalctl", "-u", "docker", "-n", "50"], - capture_output=True, text=True) - error_logs = result.stdout if result.returncode == 0 else "Failed to get logs" - raise Exception(f"Failed to manage Docker service. Error: {str(e)}\nLogs: {error_logs}") - - def create_docker_context(self): - self.debug_print("Creating Docker context...") - docker_port = 2376 if self.env == "production" else 2377 - local_ip = self.get_local_ip() - - try: - self.debug_print("Checking Docker context support...") - result = subprocess.run(["docker", "context", "ls"], - capture_output=True, text=True) - if result.returncode != 0: - raise Exception("Docker not available or doesn't support contexts") - - self.debug_print(f"Removing existing context {self.context_name} if it exists...") - subprocess.run(["docker", "context", "rm", "-f", self.context_name], - capture_output=True, check=False) - - self.debug_print("Creating new Docker context...") - result = subprocess.run([ - "docker", "context", "create", self.context_name, - "--docker", f"host=tcp://{local_ip}:{docker_port}", - "--docker", f"ca={self.docker_certs_dir / 'ca.pem'}", - "--docker", f"cert={self.docker_certs_dir / 'cert.pem'}", - "--docker", f"key={self.docker_certs_dir / 'key.pem'}" - ], capture_output=True, text=True) - - if result.returncode != 0: - raise Exception(f"Failed to create Docker context: {result.stderr}") - - self.debug_print(f"Switching to context {self.context_name}...") - subprocess.run(["docker", "context", "use", self.context_name], - capture_output=True, text=True) - - self.debug_print("Verifying context switch...") - current_context = subprocess.run(["docker", "context", "ls", "--format", "{{.Name}} {{.Current}}"], - capture_output=True, text=True) - - if f"{self.context_name} true" not in current_context.stdout: - raise Exception(f"Failed to switch to context {self.context_name}. Current contexts:\n{current_context.stdout}") - - return self.context_name - - except Exception as e: - raise Exception(f"Error setting up Docker context: {str(e)}") - - def test_docker_context_output(self): - try: - local_ip = self.get_local_ip() - docker_port = 2376 if self.env == "production" else 2377 - - os.environ["DOCKER_HOST"] = f"tcp://{local_ip}:{docker_port}" - os.environ["DOCKER_TLS_VERIFY"] = "1" - os.environ["DOCKER_CERT_PATH"] = str(self.docker_certs_dir) - - self.debug_print("Checking Docker contexts...") - context_result = subprocess.run( - ["docker", "context", "ls"], - capture_output=True, - text=True - ) - - self.debug_print("Checking systemd status...") - systemd_result = subprocess.run( - ["systemctl", "status", "docker"], - capture_output=True, - text=True - ) - - self.debug_print("Checking Docker daemon status...") - daemon_result = subprocess.run( - ["docker", "info"], - capture_output=True, - text=True - ) - - if context_result.returncode != 0: - self.debug_print(f"Error listing Docker contexts:\n{context_result.stderr}") - return { - "status": "error", - "message": "Failed to list Docker contexts", - "error": context_result.stderr - } - - if daemon_result.returncode != 0: - self.debug_print(f"Error checking Docker daemon:\n{daemon_result.stderr}") - return { - "status": "error", - "message": "Docker daemon is not running", - "error": daemon_result.stderr - } - - self.debug_print("Docker contexts:") - self.debug_print(context_result.stdout) - self.debug_print("Docker daemon info:") - self.debug_print(daemon_result.stdout) - - return { - "status": "success", - "contexts": context_result.stdout, - "daemon_info": daemon_result.stdout - } - - except Exception as e: - self.debug_print(f"Error testing Docker context: {str(e)}") - return { - "status": "error", - "message": "Failed to test Docker context", - "error": str(e) - } - - def setup(self): - self.debug_print("Starting Docker setup...") - self.setup_docker_certs() - self.setup_docker_systemd_override() - self.setup_docker_daemon_for_tcp() - self.create_docker_context() - time.sleep(20) - return self.test_docker_context_output() \ No newline at end of file diff --git a/installer/environment.py b/installer/environment.py deleted file mode 100644 index a90a72f4..00000000 --- a/installer/environment.py +++ /dev/null @@ -1,355 +0,0 @@ -import os -import sys -import json -import secrets -import string -import logging -from pathlib import Path -import subprocess -from docker_setup import DockerSetup -from ssh_setup import SSHSetup, SSHConfig -from dataclasses import dataclass -from typing import Dict, Optional -from base_config import BaseConfig - -@dataclass -class URLConfig: - pattern: str - protocols: Dict[str, str] - -@dataclass -class DirectoryConfig: - ssh: str - source: str - api: str - view: str - db: str - caddy: Dict[str, str] - -@dataclass -class FileConfig: - env: str - env_sample: str - permissions: Dict[str, str] - -@dataclass -class ErrorConfig: - invalid_environment: str - config_not_found: str - invalid_json: str - env_not_found: str - missing_keys: str - invalid_type: str - invalid_url_type: str - invalid_dir_type: str - invalid_subdir_type: str - ssh_keygen_failed: str - ssh_keygen_not_found: str - ssh_key_error: str - auth_keys_error: str - file_read_error: str - file_write_error: str - invalid_domain: str - setup_error: str - -@dataclass -class EnvironmentConfig: - env: str - config_dir: Path - api_port: int - next_public_port: int - db_port: int - host_name: str - redis_url: str - mount_path: str - docker_host: str - docker_port: int - ssh_port: int - ssh_user: str - ssh_key_bits: int - ssh_key_type: str - caddy_endpoint: str - caddy_data_volume: str - caddy_config_volume: str - db_name_prefix: str - db_user_prefix: str - db_name_length: int - db_user_length: int - db_password_length: int - db_ssl_mode: str - version_file_path: str - urls: Dict[str, URLConfig] - directories: DirectoryConfig - files: FileConfig - errors: ErrorConfig - - def get_url(self, url_type: str, host: str, secure: bool = True) -> str: - if url_type not in self.urls: - raise ValueError(self.errors.invalid_url_type.format(type=url_type)) - - url_config = self.urls[url_type] - protocol = url_config.protocols["secure" if secure else "insecure"] - return url_config.pattern.format(protocol=protocol, host=host) - - def get_path(self, dir_type: str, sub_type: Optional[str] = None) -> Path: - if dir_type not in self.directories.__dict__: - raise ValueError(self.errors.invalid_dir_type.format(type=dir_type)) - - base_path = getattr(self.directories, dir_type) - if isinstance(base_path, dict) and sub_type: - if sub_type not in base_path: - raise ValueError(self.errors.invalid_subdir_type.format(type=sub_type)) - return self.config_dir / base_path[sub_type] - return self.config_dir / base_path - - def get_permission(self, file_type: str) -> int: - if file_type not in self.files.permissions: - raise ValueError(f"Invalid file type for permissions: {file_type}") - return int(self.files.permissions[file_type], 8) - - @classmethod - def create(cls, env: str) -> 'EnvironmentConfig': - VALID_ENVIRONMENTS = ["production", "staging"] - REQUIRED_CONFIG_KEYS = [ - "config_dir", "api_port", "next_public_port", "db_port", "host_name", - "redis_url", "mount_path", "docker_host", "docker_port", "ssh", - "caddy", "database", "version", "urls", "directories", "files", "errors" - ] - - config_path = Path(__file__).parent.parent / "helpers" / "config.json" - base_config = BaseConfig[EnvironmentConfig]( - config_path=config_path, - env=env, - required_keys=REQUIRED_CONFIG_KEYS, - valid_environments=VALID_ENVIRONMENTS - ) - - config = base_config.load_config() - env_config = config[env] - - try: - urls = { - url_type: URLConfig( - pattern=url_config["pattern"], - protocols=url_config["protocols"] - ) - for url_type, url_config in env_config["urls"].items() - } - - directories = DirectoryConfig( - ssh=env_config["directories"]["ssh"], - source=env_config["directories"]["source"], - api=env_config["directories"]["api"], - view=env_config["directories"]["view"], - db=env_config["directories"]["db"], - caddy=env_config["directories"]["caddy"] - ) - - files = FileConfig( - env=env_config["files"]["env"], - env_sample=env_config["files"]["env_sample"], - permissions=env_config["files"]["permissions"] - ) - - errors = ErrorConfig( - invalid_environment=env_config["errors"]["invalid_environment"], - config_not_found=env_config["errors"]["config_not_found"], - invalid_json=env_config["errors"]["invalid_json"], - env_not_found=env_config["errors"]["env_not_found"], - missing_keys=env_config["errors"]["missing_keys"], - invalid_type=env_config["errors"]["invalid_type"], - invalid_url_type=env_config["errors"]["invalid_url_type"], - invalid_dir_type=env_config["errors"]["invalid_dir_type"], - invalid_subdir_type=env_config["errors"]["invalid_subdir_type"], - ssh_keygen_failed=env_config["errors"]["ssh_keygen_failed"], - ssh_keygen_not_found=env_config["errors"]["ssh_keygen_not_found"], - ssh_key_error=env_config["errors"]["ssh_key_error"], - auth_keys_error=env_config["errors"]["auth_keys_error"], - file_read_error=env_config["errors"]["file_read_error"], - file_write_error=env_config["errors"]["file_write_error"], - invalid_domain=env_config["errors"]["invalid_domain"], - setup_error=env_config["errors"]["setup_error"] - ) - - return cls( - env=env, - config_dir=Path(env_config["config_dir"]), - api_port=int(env_config["api_port"]), - next_public_port=int(env_config["next_public_port"]), - db_port=int(env_config["db_port"]), - host_name=str(env_config["host_name"]), - redis_url=str(env_config["redis_url"]), - mount_path=str(env_config["mount_path"]), - docker_host=str(env_config["docker_host"]), - docker_port=int(env_config["docker_port"]), - ssh_port=int(env_config["ssh"]["port"]), - ssh_user=str(env_config["ssh"]["user"]), - ssh_key_bits=int(env_config["ssh"]["key_bits"]), - ssh_key_type=str(env_config["ssh"]["key_type"]), - caddy_endpoint=str(env_config["caddy"]["endpoint"]), - caddy_data_volume=str(env_config["caddy"]["data_volume"]), - caddy_config_volume=str(env_config["caddy"]["config_volume"]), - db_name_prefix=str(env_config["database"]["name_prefix"]), - db_user_prefix=str(env_config["database"]["user_prefix"]), - db_name_length=int(env_config["database"]["name_length"]), - db_user_length=int(env_config["database"]["user_length"]), - db_password_length=int(env_config["database"]["password_length"]), - db_ssl_mode=str(env_config["database"]["ssl_mode"]), - version_file_path=str(env_config["version"]["file_path"]), - urls=urls, - directories=directories, - files=files, - errors=errors - ) - except (ValueError, TypeError) as e: - raise Exception(env_config["errors"]["invalid_type"].format(env=env, error=str(e))) from e - -class EnvironmentSetup: - def __init__(self, domains: Optional[Dict[str, str]], env: str = "staging", debug: bool = False): - self.domains = domains - self.project_root = Path(__file__).parent.parent - self.config = EnvironmentConfig.create(env) - self.env_file = self.project_root / self.config.files.env - self.env_sample = self.project_root / self.config.files.env_sample - self.ssh_dir = self.config.get_path("ssh") - self.context_name = f"nixopus-{self.config.env}" - self.source_dir = self.config.get_path("source") - self.logger = logging.getLogger("nixopus") - if debug: - self.logger.setLevel(logging.DEBUG) - self.docker_setup = DockerSetup(env, debug) - self.ssh_setup = SSHSetup( - SSHConfig( - port=self.config.ssh_port, - user=self.config.ssh_user, - key_bits=self.config.ssh_key_bits, - key_type=self.config.ssh_key_type, - errors=self.config.errors - ), - self.ssh_dir - ) - - def generate_random_string(self, length=12): - if length <= 0: - raise ValueError("Length must be positive") - - alphabet = string.ascii_letters + string.digits - return ''.join(secrets.choice(alphabet) for _ in range(length)) - - def get_version(self): - self.logger.debug("Getting version from version file") - version_file = self.project_root / self.config.version_file_path - if version_file.exists(): - try: - with open(version_file, 'r') as f: - version = f.read().strip() - self.logger.debug(f"Version: {version}") - return version - except IOError as e: - self.logger.debug(f"Error reading version file: {e}") - raise Exception(self.config.errors.file_read_error.format(error=str(e))) - self.logger.debug("Version file not found, returning 'unknown'") - return "unknown" - - def setup_environment(self): - try: - self.logger.debug("Starting environment setup") - db_name = f"{self.config.db_name_prefix}{self.generate_random_string(self.config.db_name_length)}" - username = f"{self.config.db_user_prefix}{self.generate_random_string(self.config.db_user_length)}" - password = self.generate_random_string(self.config.db_password_length) - - self.logger.debug("Generating SSH keys") - private_key_path, public_key_path = self.ssh_setup.generate_key() - self.ssh_setup.setup_authorized_keys(public_key_path, self.config.files.permissions) - - self.logger.debug("Getting public IP") - local_ip = self.docker_setup.get_public_ip() - self.logger.debug(f"Public IP: {local_ip}") - - self.logger.debug("Setting up Docker context") - docker_context = self.docker_setup.setup() - - domain_not_provided = self.domains is None - if not domain_not_provided: - self.logger.debug("Validating domains") - if not all(isinstance(domain, str) and '.' in domain for domain in self.domains.values()): - raise ValueError(self.config.errors.invalid_domain) - - api_host = self.domains['api_domain'] if not domain_not_provided else f"{local_ip}:{self.config.api_port}" - app_host = self.domains['app_domain'] if not domain_not_provided else f"{local_ip}:{self.config.next_public_port}" - - self.logger.debug("Generating URLs") - api_url = self.config.get_url("api", api_host, not domain_not_provided) - websocket_url = self.config.get_url("websocket", api_host, not domain_not_provided) - webhook_url = self.config.get_url("webhook", api_host, not domain_not_provided) - allowed_origin = self.config.get_url("app", app_host, not domain_not_provided) - - self.logger.debug("Setting up environment variables") - base_env_vars = { - "DB_NAME": db_name, - "USERNAME": username, - "PASSWORD": password, - "HOST_NAME": self.config.host_name, - "DB_PORT": str(self.config.db_port), - "SSL_MODE": self.config.db_ssl_mode, - "API_PORT": str(self.config.api_port), - "API_URL": api_url, - "WEBSOCKET_URL": websocket_url, - "WEBHOOK_URL": webhook_url, - "NEXT_PUBLIC_PORT": str(self.config.next_public_port), - "MOUNT_PATH": self.config.mount_path, - "PORT": str(self.config.api_port), - "SSH_HOST": local_ip, - "SSH_PORT": str(self.config.ssh_port), - "SSH_USER": self.config.ssh_user, - "SSH_PRIVATE_KEY": str(private_key_path), - "DOCKER_HOST": self.config.docker_host.format(ip=local_ip), - "DOCKER_TLS_VERIFY": "1", - "DOCKER_CERT_PATH": str(self.docker_setup.docker_certs_dir), - "DOCKER_CONTEXT": docker_context, - "CADDY_ENDPOINT": self.config.caddy_endpoint, - "CADDY_DATA_VOLUME": self.config.get_path("caddy", "data"), - "CADDY_CONFIG_VOLUME": self.config.get_path("caddy", "config"), - "DB_VOLUME": self.config.get_path("db"), - "ALLOWED_ORIGIN": allowed_origin, - "APP_VERSION": self.get_version(), - "REDIS_URL": self.config.redis_url - } - - try: - self.logger.debug("Writing environment files") - with open(self.env_file, 'w') as f: - for key, value in base_env_vars.items(): - f.write(f"{key}={value}\n") - - api_env_vars = base_env_vars.copy() - api_env_vars["PORT"] = str(self.config.api_port) - api_env_file = self.config.get_path("api") / self.config.files.env - api_env_file.parent.mkdir(parents=True, exist_ok=True) - with open(api_env_file, 'w') as f: - for key, value in api_env_vars.items(): - f.write(f"{key}={value}\n") - - view_env_vars = base_env_vars.copy() - view_env_vars["PORT"] = str(self.config.next_public_port) - view_env_file = self.config.get_path("view") / self.config.files.env - view_env_file.parent.mkdir(parents=True, exist_ok=True) - with open(view_env_file, 'w') as f: - for key, value in view_env_vars.items(): - f.write(f"{key}={value}\n") - - self.logger.debug("Setting file permissions") - self.env_file.chmod(self.config.get_permission("env")) - private_key_path.chmod(self.config.get_permission("private_key")) - public_key_path.chmod(self.config.get_permission("public_key")) - self.logger.debug("Environment setup completed successfully") - return base_env_vars - except IOError as e: - self.logger.debug(f"Error writing environment files: {e}") - raise Exception(self.config.errors.file_write_error.format(error=str(e))) - except ValueError as e: - self.logger.debug(f"Validation error: {e}") - raise e - except Exception as e: - self.logger.debug(f"Setup error: {e}") - raise Exception(self.config.errors.setup_error.format(error=str(e))) \ No newline at end of file diff --git a/installer/input_parser.py b/installer/input_parser.py deleted file mode 100644 index 50a4ad29..00000000 --- a/installer/input_parser.py +++ /dev/null @@ -1,84 +0,0 @@ -import argparse -import secrets -import string -import sys -import getpass -import logging - -from validation import Validation - - -class InputParser: - def __init__(self): - self.parser = self._setup_arg_parser() - self.validation = Validation() - self.logger = logging.getLogger("nixopus") - - def _setup_arg_parser(self): - parser = argparse.ArgumentParser(description='Nixopus Installation Wizard') - parser.add_argument('--api-domain', help='The domain where the nixopus api will be accessible (e.g. nixopusapi.example.com)') - parser.add_argument('--app-domain', help='The domain where the nixopus app will be accessible (e.g. nixopus.example.com)') - parser.add_argument('--email', '-e', help='The email to create the admin account with') - parser.add_argument('--password', '-p', help='The password to create the admin account with') - parser.add_argument('--env', choices=['production', 'staging'], default='production', help='The environment to install in (production or staging)') - parser.add_argument("--debug", action='store_true', help='Enable debug mode') - return parser - - def parse_args(self): - args = self.parser.parse_args() - if args.debug: - self.logger.setLevel(logging.DEBUG) - return args - - def generate_strong_password(self): - while True: - password = ''.join(secrets.choice( - string.ascii_letters + string.digits + self.validation.SPECIAL_CHARS - ) for _ in range(16)) - if (any(c.isupper() for c in password) and - any(c.islower() for c in password) and - any(c.isdigit() for c in password) and - any(c in self.validation.SPECIAL_CHARS for c in password)): - self.logger.debug(f"Generated password: {password}") - return password - - def get_env_from_args(self, args): - """ - Get the environment from the command line arguments - """ - if args.env: - if args.env not in ['production', 'staging']: - print("Error: Environment must be either 'production' or 'staging'") - sys.exit(1) - self.logger.debug(f"Using environment: {args.env}") - return args.env - else: - self.logger.debug("No environment specified, defaulting to production") - return "production" - - def get_domains_from_args(self, args): - if args.api_domain and args.app_domain: - try: - self.logger.debug(f"Validating domains - API: {args.api_domain}, App: {args.app_domain}") - self.validation.validate_domain(args.api_domain) - self.validation.validate_domain(args.app_domain) - return { - "api_domain": args.api_domain, - "app_domain": args.app_domain, - } - except SystemExit: - return None - self.logger.debug("No domains provided") - return None - - def get_admin_credentials_from_args(self, args): - # if email and password are provided, validate them and return them - if args.email and args.password: - self.logger.debug(f"Validating admin credentials for email: {args.email}") - self.validation.validate_email(args.email) - self.validation.validate_password(args.password) - return args.email, args.password - - # return None if only one of email or password is provided or if both are not provided - self.logger.debug("No admin credentials provided") - return None, None \ No newline at end of file diff --git a/installer/install.py b/installer/install.py deleted file mode 100644 index 3f4da929..00000000 --- a/installer/install.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 - -import time -import logging -from pathlib import Path -from environment import EnvironmentSetup -from input_parser import InputParser -from service_manager import ServiceManager -import sys - -class Installer: - def __init__(self): - self.required_docker_version = "20.10.0" - self.required_compose_version = "2.0.0" - self.project_root = Path(__file__).parent.parent - self.env_file = self.project_root / ".env" - self.env_sample = self.project_root / ".env.sample" - self.input_parser = InputParser() - self.service_manager = None # Will be initialized with environment later - self.logger = logging.getLogger("nixopus") - -def main(): - installer = Installer() - args = installer.input_parser.parse_args() - - print("\033[36m _ _ _ _ \033[0m") - print("\033[36m | \\ | (_) \033[0m") - print("\033[36m | \\| |___ _____ _ __ _ _ ___ \033[0m") - print("\033[36m | . ` | \\ \\/ / _ \\| '_ \\| | | / __|\033[0m") - print("\033[36m | |\\ | |> < (_) | |_) | |_| \\__ \033[0m") - print("\033[36m |_| \\_|_/_/\\_\\___/| .__/ \\__,_|___/\033[0m") - print("\033[36m | | \033[0m") - print("\033[36m |_| \033[0m") - print("\n") - print("\033[1mWelcome to Nixopus Installation Wizard\033[0m") - print("This wizard will guide you through the installation process of Nixopus.") - print("Please follow the prompts carefully to complete the setup.\n") - - installer.logger.debug("Starting installation process...") - - env = installer.input_parser.get_env_from_args(args) - domains = installer.input_parser.get_domains_from_args(args) - email, password = installer.input_parser.get_admin_credentials_from_args(args) - - installer.logger.debug("Initializing service manager...") - installer.service_manager = ServiceManager(installer.project_root, env, args.debug) - - installer.logger.debug("Checking system requirements...") - installer.service_manager.check_system_requirements() - - installer.logger.debug("Setting up environment...") - env_setup = EnvironmentSetup(domains, env, args.debug) - env_vars = env_setup.setup_environment() - - installer.logger.debug("Environment setup completed!") - - installer.logger.debug("Starting services...") - installer.service_manager.start_services(env) - - installer.logger.debug("Verifying installation...") - installer.service_manager.verify_installation(env) - - if domains is not None: - installer.logger.debug("Setting up Caddy reverse proxy...") - installer.service_manager.setup_caddy(domains, env) - - installer.logger.debug("Waiting for services to start...") - time.sleep(10) - - max_retries = 3 - retry_count = 0 - - if email is not None and password is not None: - installer.logger.debug("Setting up admin account...") - while retry_count < max_retries: - if installer.service_manager.check_api_up_status(env_vars["API_PORT"]): - installer.logger.debug("API is up, creating admin account...") - installer.service_manager.setup_admin(email, password, env_vars["API_PORT"]) - break - retry_count += 1 - if retry_count < max_retries: - installer.logger.debug(f"Retrying API status check (attempt {retry_count + 1}/{max_retries})...") - time.sleep(2) - - docker_setup = installer.service_manager.docker_setup - if domains and isinstance(domains, dict) and domains.get("app_domain"): - nixopus_accessible_at = domains["app_domain"] - installer.logger.debug(f"Using domain for access: {nixopus_accessible_at}") - else: - app_port = str(env_vars.get("APP_PORT", "")) - public_ip = docker_setup.get_public_ip() - nixopus_accessible_at = ( - public_ip - if app_port in {"80", "443"} - else f"{public_ip}:{app_port}" - ) - installer.logger.debug(f"Using IP and port for access: {nixopus_accessible_at}") - - print("\n\033[1mInstallation Complete!\033[0m") - print(f"• Nixopus is accessible at: {nixopus_accessible_at}") - print("\n\033[1mThank you for installing Nixopus!\033[0m") - print("\n\033[1mPlease visit the documentation at https://docs.nixopus.com for more information.\033[0m") - print("\n\033[1mIf you have any questions, please visit the community forum at https://discord.gg/skdcq39Wpv\033[0m") - print("\n\033[1mSee you in the community!\033[0m") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/installer/requirements.txt b/installer/requirements.txt deleted file mode 100644 index b317a696..00000000 --- a/installer/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -pathlib>=1.0.1 -requests>=2.28.1 \ No newline at end of file diff --git a/installer/service_config.py b/installer/service_config.py deleted file mode 100644 index 1b980ec0..00000000 --- a/installer/service_config.py +++ /dev/null @@ -1,18 +0,0 @@ -from dataclasses import dataclass -from typing import Dict, List -from pathlib import Path - -@dataclass -class ServiceConfig: - config_dir: Path - docker: Dict[str, str] - source: str - compose: Dict[str, str] - containers: Dict[str, str] - caddy: Dict[str, str] - api: Dict[str, str] - system: Dict[str, List[str]] - - def __post_init__(self): - if isinstance(self.config_dir, str): - self.config_dir = Path(self.config_dir) \ No newline at end of file diff --git a/installer/service_manager.py b/installer/service_manager.py deleted file mode 100644 index b1a6ed5b..00000000 --- a/installer/service_manager.py +++ /dev/null @@ -1,228 +0,0 @@ -import subprocess -import shutil -import json -import requests -import os -import sys -import platform -from pathlib import Path -from docker_setup import DockerSetup -from base_config import BaseConfig -from service_config import ServiceConfig -import time - -class ServiceManager: - def __init__(self, project_root, env="staging", debug=False): - self.project_root = project_root - self.debug = debug - self.docker_setup = DockerSetup(env, debug) - self.config = self._load_config(env) - - def debug_print(self, message): - if self.debug: - print(f"[DEBUG] {message}") - - def _load_config(self, env: str) -> ServiceConfig: - self.debug_print("Loading service configuration...") - config_path = Path(__file__).parent.parent / "helpers" / "config.json" - base_config = BaseConfig[ServiceConfig]( - config_path=config_path, - env=env, - required_keys=[ - "config_dir", "docker", "source", "compose", "containers", "caddy", "api", "system" - ], - valid_environments=["production", "staging"] - ) - return base_config.create(ServiceConfig) - - def check_system_requirements(self): - self.debug_print("Checking system requirements...") - - system = platform.system() - if system not in self.config.system['supported_os']: - print(f"Error: Unsupported operating system: {system}") - sys.exit(1) - - self.check_required_tools() - - self.debug_print("System requirements check passed!") - - def check_required_tools(self): - self.debug_print("Checking required tools...") - for tool in self.config.system['required_tools']: - if not shutil.which(tool): - print(f"Error: {tool} is not installed") - sys.exit(1) - - def start_services(self, env): - self.debug_print("Starting services...") - try: - try: - subprocess.run(["docker", "info"], check=True, capture_output=True) - except subprocess.CalledProcessError: - print("Error: Docker daemon is not running. Please start the Docker service and try again.") - sys.exit(1) - - os.environ["DOCKER_HOST"] = f"tcp://localhost:{self.config.docker['port']}" - os.environ["DOCKER_TLS_VERIFY"] = "1" - os.environ["DOCKER_CERT_PATH"] = self.config.docker['cert_path'] - os.environ["DOCKER_CONTEXT"] = self.config.docker['context'] - - compose_file = os.path.join(self.config.source, self.config.compose['file']) - - self.debug_print(f"Using Docker Compose file: {compose_file}") - if not os.path.exists(compose_file): - print(f"Error: Docker Compose file not found at {compose_file}") - sys.exit(1) - - compose_cmd = ["docker", "compose", "-f", compose_file] - - if env == "staging": - self.debug_print("Building and starting staging services...") - result = subprocess.run( - compose_cmd + ["up", "--build", "-d"], - capture_output=True, - text=True, - cwd=self.project_root - ) - if result.returncode != 0: - print("Error building and starting services:") - print(result.stderr) - raise Exception("Failed to build and start services") - else: - self.debug_print("Pulling production images...") - pull_result = subprocess.run( - compose_cmd + ["pull"], - capture_output=True, - text=True, - cwd=self.project_root - ) - if pull_result.returncode != 0: - print("Error pulling images:") - print(pull_result.stderr) - raise Exception("Failed to pull images") - - self.debug_print("Starting services...") - result = subprocess.run( - compose_cmd + ["up", "-d"], - capture_output=True, - text=True, - cwd=self.project_root - ) - if result.returncode != 0: - print("Error starting services:") - print(result.stderr) - raise Exception("Failed to start services") - except Exception as e: - print(f"Error starting services: {str(e)}") - sys.exit(1) - - def verify_installation(self, env): - self.debug_print("Verifying installation...") - try: - result = subprocess.run(["docker", "ps", "--format", "{{.Names}} {{.Status}}"], capture_output=True, text=True) - if result.returncode != 0: - print("Error verifying installation:") - print(result.stderr) - sys.exit(1) - - running_containers = result.stdout.splitlines() - required_containers = self.config.containers - - missing_containers = [] - for container, service_name in required_containers.items(): - container_running = any( - line.startswith(container) and "Up" in line - for line in running_containers - ) - if not container_running: - missing_containers.append(service_name) - - if missing_containers: - print("Error: The following services are not running:") - for service in missing_containers: - print(f" - {service}") - sys.exit(1) - - self.debug_print("All services are running successfully!") - except Exception as e: - print(f"Error verifying installation: {str(e)}") - sys.exit(1) - - def setup_caddy(self, domains, env): - self.debug_print("Setting up Proxy...") - try: - with open(self.project_root / self.config.caddy['config_path'], 'r') as f: - config_str = f.read() - config_str = config_str.replace('{env.APP_DOMAIN}', domains['app_domain']) - config_str = config_str.replace('{env.API_DOMAIN}', domains['api_domain']) - app_reverse_proxy_url = self.config.caddy["reverse_proxy"]["app"] - api_reverse_proxy_url = self.config.caddy["reverse_proxy"]["api"] - config_str = config_str.replace('{env.APP_REVERSE_PROXY_URL}', app_reverse_proxy_url) - config_str = config_str.replace('{env.API_REVERSE_PROXY_URL}', api_reverse_proxy_url) - new_config = json.loads(config_str) - self.debug_print("Loading Caddy configuration...") - response = requests.post( - f'http://localhost:{self.config.caddy["admin_port"]}/load', - json=new_config, - headers={'Content-Type': 'application/json'} - ) - if response.status_code != 200: - print("Failed to create server configuration:") - print(response.text) - raise Exception("Failed to create server configuration") - self.debug_print("Caddy configuration loaded successfully") - except requests.exceptions.RequestException as e: - print(f"Error connecting to Caddy: {str(e)}") - except Exception as e: - print(f"Error setting up Caddy: {str(e)}") - - def check_api_up_status(self, port): - self.debug_print(f"Checking API status on port {port}...") - try: - response = requests.get(f"http://localhost:{port}{self.config.api['health_endpoint']}", verify=False) - if response.status_code == 200: - self.debug_print("API is up and running") - return True - self.debug_print("API is not responding") - return False - except requests.exceptions.RequestException as e: - self.debug_print(f"Error checking API status: {str(e)}") - return False - - def setup_admin(self, email, password, port): - self.debug_print("Setting up admin...") - username = email.split('@')[0] - - try: - self.debug_print(f"Creating admin account for {email}...") - response = requests.post( - f"http://localhost:{port}{self.config.api['register_endpoint']}", - json={ - "email": email, - "password": password, - "type": "admin", - "username": username, - "organization": "" - }, - headers={"Content-Type": "application/json"} - ) - - if response.status_code == 200: - self.debug_print("Admin setup completed successfully") - return - - if response.status_code == 400 and "admin already registered" in response.text: - self.debug_print("Admin already registered") - return - - error_msg = response.json().get("message", "Unknown error") - print(f"API Error: {error_msg}") - raise Exception(f"API Error: {error_msg}") - - except requests.exceptions.RequestException as e: - print(f"Request failed: {str(e)}") - raise Exception(f"Failed to connect to API: {str(e)}") - except json.JSONDecodeError as e: - print(f"Invalid JSON response: {response.text}") - raise Exception(f"Invalid response from API: {str(e)}") diff --git a/installer/ssh_setup.py b/installer/ssh_setup.py deleted file mode 100644 index bd9eb5bd..00000000 --- a/installer/ssh_setup.py +++ /dev/null @@ -1,88 +0,0 @@ -import subprocess -import logging -from pathlib import Path -from typing import Tuple -from dataclasses import dataclass - -@dataclass -class SSHConfig: - port: int - user: str - key_bits: int - key_type: str - errors: dict - -class SSHSetup: - def __init__(self, config: SSHConfig, ssh_dir: Path): - self.config = config - self.ssh_dir = ssh_dir - self.logger = logging.getLogger("nixopus") - - def generate_key(self) -> Tuple[Path, Path]: - self.logger.debug(f"Generating SSH key in {self.ssh_dir}") - self.ssh_dir.mkdir(parents=True, exist_ok=True) - private_key_path = self.ssh_dir / f"id_{self.config.key_type}" - public_key_path = self.ssh_dir / f"id_{self.config.key_type}.pub" - - # (Re)generate when either part of the keypair is missing - if not private_key_path.exists() or not public_key_path.exists(): - self.logger.debug(f"Generating new key pair - Type: {self.config.key_type}, Bits: {self.config.key_bits}") - try: - subprocess.run( - [ - "ssh-keygen", - "-t", self.config.key_type, - "-b", str(self.config.key_bits), - "-f", str(private_key_path), - "-N", "" - ], - check=True - ) - self.logger.debug("SSH key pair generated successfully") - except FileNotFoundError as err: - self.logger.debug(f"ssh-keygen not found: {err}") - raise Exception( - self.config.errors["ssh_keygen_not_found"].format(error=str(err)) - ) from err - except subprocess.CalledProcessError as err: - self.logger.debug(f"ssh-keygen failed: {err}") - raise Exception( - self.config.errors["ssh_keygen_failed"] - ) from err - except Exception as err: - self.logger.debug(f"Error generating SSH key: {err}") - raise Exception( - self.config.errors["ssh_key_error"].format(error=str(err)) - ) from err - else: - self.logger.debug("Using existing SSH key pair") - return private_key_path, public_key_path - - def setup_authorized_keys(self, public_key_path: Path, permissions: dict) -> None: - self.logger.debug("Setting up authorized keys") - try: - ssh_config_dir = Path.home() / ".ssh" - ssh_config_dir.mkdir(mode=0o700, parents=True, exist_ok=True) - authorized_keys_path = ssh_config_dir / "authorized_keys" - - with open(public_key_path, 'r') as pk_file: - public_key_content = pk_file.read().strip() - - if authorized_keys_path.exists(): - self.logger.debug("Checking existing authorized_keys file") - with open(authorized_keys_path, 'r') as auth_file: - existing_content = auth_file.read() - if public_key_content in existing_content: - self.logger.debug("Public key already exists in authorized_keys") - return - - self.logger.debug("Adding public key to authorized_keys") - with open(authorized_keys_path, 'a+') as auth_file: - auth_file.write(f"\n{public_key_content}\n") - - authorized_keys_path.chmod(int(permissions["authorized_keys"], 8)) - self.logger.debug("Authorized keys setup completed") - - except Exception as e: - self.logger.debug(f"Error setting up authorized keys: {e}") - raise Exception(self.config.errors["auth_keys_error"].format(error=str(e))) \ No newline at end of file diff --git a/installer/update.py b/installer/update.py deleted file mode 100755 index 83889990..00000000 --- a/installer/update.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import subprocess -from pathlib import Path -import shutil -import json -import platform -import re - -class Updater: - def __init__(self): - self.project_root = Path(__file__).parent.parent - self.env_file = self.project_root / ".env" - self.required_docker_version = "20.10.0" - self.required_compose_version = "2.0.0" - - def ask_for_sudo(self): - if os.geteuid() != 0: - print("Please run the script with sudo privileges") - sys.exit(1) - - def check_docker_version(self): - try: - result = subprocess.run(["docker", "--version"], check=True, capture_output=True, text=True) - version_string = result.stdout.strip() - if not self._version_check(version_string, self.required_docker_version): - print(f"Error: Docker version {self.required_docker_version} or higher is required") - print(f"Current version: {version_string}") - sys.exit(1) - except subprocess.CalledProcessError as e: - print(f"Error: Docker is not installed or not working properly") - print(e.stderr.decode()) - sys.exit(1) - - def check_docker_compose_version(self): - try: - result = subprocess.run(["docker-compose", "--version"], check=True, capture_output=True, text=True) - version_string = result.stdout.strip() - if not self._version_check(version_string, self.required_compose_version): - print(f"Error: Docker Compose version {self.required_compose_version} or higher is required") - print(f"Current version: {version_string}") - sys.exit(1) - except subprocess.CalledProcessError as e: - print(f"Error: Docker Compose is not installed or not working properly") - print(e.stderr.decode()) - sys.exit(1) - - def check_curl_installed(self): - if not shutil.which("curl"): - print("Error: Curl is not installed") - sys.exit(1) - - def _version_check(self, version_string, required_version): - version = re.search(r'\d+\.\d+\.\d+', version_string) - if not version: - return False - return tuple(map(int, version.group().split('.'))) >= tuple(map(int, required_version.split('.'))) - - def check_system_requirements(self): - print("Checking system requirements...") - - system = platform.system() - if system not in ["Linux"]: - print(f"Error: Unsupported operating system: {system}") - sys.exit(1) - - self.check_docker_version() - self.check_docker_compose_version() - self.check_curl_installed() - - print("System requirements check passed!") - - def update_services(self): - print("\nUpdating services...") - try: - try: - subprocess.run(["docker", "info"], check=True, capture_output=True) - except subprocess.CalledProcessError: - print("Error: Docker daemon is not running. Please start the Docker service and try again.") - sys.exit(1) - - os.environ["DOCKER_HOST"] = "tcp://localhost:2376" - os.environ["DOCKER_TLS_VERIFY"] = "1" - os.environ["DOCKER_CERT_PATH"] = "/etc/nixopus/docker-certs" - - services = { - "nixopus-api-container": "nixopus-api:latest", - "nixopus-db-container": "nixopus-db:latest", - "nixopus-view-container": "nixopus-view:latest", - "nixopus-caddy-container": "nixopus-caddy:latest" - } - - for container_name, image_name in services.items(): - print(f"\nUpdating {container_name}...") - - inspect_result = subprocess.run( - ["docker", "inspect", container_name], - capture_output=True, - text=True - ) - - if inspect_result.returncode == 0: - container_config = json.loads(inspect_result.stdout)[0] - env_vars = container_config.get("Config", {}).get("Env", []) - volumes = container_config.get("HostConfig", {}).get("Binds", []) - ports = container_config.get("HostConfig", {}).get("PortBindings", {}) - networks = container_config.get("NetworkSettings", {}).get("Networks", {}) - - subprocess.run(["docker", "stop", container_name], capture_output=True) - subprocess.run(["docker", "rm", container_name], capture_output=True) - - pull_result = subprocess.run( - ["docker", "pull", image_name], - capture_output=True, - text=True - ) - - if pull_result.returncode != 0: - print(f"Error pulling image {image_name}:") - print(pull_result.stderr) - sys.exit(1) - - run_cmd = ["docker", "run", "-d", "--name", container_name] - - for env in env_vars: - run_cmd.extend(["-e", env]) - - for volume in volumes: - run_cmd.extend(["-v", volume]) - - for container_port, host_ports in ports.items(): - for host_port in host_ports: - run_cmd.extend(["-p", f"{host_port['HostPort']}:{container_port.split('/')[0]}"]) - - for network_name in networks.keys(): - run_cmd.extend(["--network", network_name]) - - run_cmd.append(image_name) - - result = subprocess.run( - run_cmd, - capture_output=True, - text=True - ) - - if result.returncode != 0: - print(f"Error starting container {container_name}:") - print(result.stderr) - sys.exit(1) - else: - pull_result = subprocess.run( - ["docker", "pull", image_name], - capture_output=True, - text=True - ) - - if pull_result.returncode != 0: - print(f"Error pulling image {image_name}:") - print(pull_result.stderr) - sys.exit(1) - - result = subprocess.run( - ["docker", "run", "-d", "--name", container_name, image_name], - capture_output=True, - text=True - ) - - if result.returncode != 0: - print(f"Error starting container {container_name}:") - print(result.stderr) - sys.exit(1) - - print("Services updated successfully!") - except Exception as e: - print(f"Error updating services: {str(e)}") - sys.exit(1) - - def verify_update(self): - print("\nVerifying update...") - try: - result = subprocess.run(["docker", "ps", "--format", "{{.Names}} {{.Status}}"], capture_output=True, text=True) - if result.returncode != 0: - print("Error verifying update:") - print(result.stderr) - sys.exit(1) - - running_containers = result.stdout.splitlines() - required_containers = { - "nixopus-api-container": "API service", - "nixopus-db-container": "Database service", - "nixopus-view-container": "View service", - "nixopus-caddy-container": "Caddy service" - } - - missing_containers = [] - for container, service_name in required_containers.items(): - container_running = any( - line.startswith(container) and "Up" in line - for line in running_containers - ) - if not container_running: - missing_containers.append(service_name) - - if missing_containers: - print("Error: The following services are not running:") - for service in missing_containers: - print(f" - {service}") - sys.exit(1) - - print("✓ All services are running successfully!") - except Exception as e: - print(f"Error verifying update: {str(e)}") - sys.exit(1) - - def setup_caddy(self): - print("\nSetting up Proxy...") - try: - with open('api/helpers/caddy.json', 'r') as f: - config = json.dumps(json.load(f)) - - result = subprocess.run( - ['curl', '-X', 'POST', 'http://localhost:2019/load', - '-H', 'Content-Type: application/json', - '-d', config], - capture_output=True, - text=True - ) - - if result.returncode == 0: - print("✓ Caddy configuration loaded successfully") - else: - print("✗ Failed to load Caddy configuration:") - print(result.stderr) - except Exception as e: - print(f"✗ Error setting up Caddy: {str(e)}") - -def main(): - updater = Updater() - - print("\033[36m _ _ _ _ \033[0m") - print("\033[36m | \ | (_) \033[0m") - print("\033[36m | \| |___ _____ _ __ _ _ ___ \033[0m") - print("\033[36m | . \` | \ \/ / _ \| '_ \| | | / __|\033[0m") - print("\033[36m | |\ | |> < (_) | |_) | |_| \__ \033[0m") - print("\033[36m |_| \_|_/_/\_\___/| .__/ \__,_|___/\033[0m") - print("\033[36m | | \033[0m") - print("\033[36m |_| \033[0m") - print("\n") - print("\033[1mWelcome to Nixopus Update Wizard\033[0m") - print("This wizard will guide you through the update process of Nixopus services.") - print("Please follow the prompts carefully to complete the update.\n") - - updater.ask_for_sudo() - updater.check_system_requirements() - updater.update_services() - updater.verify_update() - updater.setup_caddy() - - print("\n\033[1mUpdate Complete!\033[0m") - print("\n\033[1mYour Nixopus services have been successfully updated to the latest version.\033[0m") - print("\n\033[1mThank you for using Nixopus!\033[0m") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/installer/validation.py b/installer/validation.py deleted file mode 100644 index 23906154..00000000 --- a/installer/validation.py +++ /dev/null @@ -1,57 +0,0 @@ -import re -import sys -import socket -import string - -class Validation: - SPECIAL_CHARS = '!@#$%^&*()_+-=[]{}|;:,.<>?' - - def __init__(self): - pass - - def validate_email(self, email): - if not email: - print("Error: Email is required") - sys.exit(1) - if not re.match(r"[^@]+@[^@]+\.[^@]+", email): - print("Error: Invalid email address") - sys.exit(1) - return email - - def validate_password(self, password): - if not password or len(password) < 8: - print(f"Error: Password must be at least 8 characters long") - sys.exit(1) - - has_uppercase = any(char.isupper() for char in password) - has_lowercase = any(char.islower() for char in password) - has_digit = any(char.isdigit() for char in password) - has_special = any(char in self.SPECIAL_CHARS for char in password) - - if not (has_uppercase and has_lowercase and has_digit and has_special): - print("Error: Password must contain at least one uppercase letter, one lowercase letter, one number, and one special character") - sys.exit(1) - - return password - - def validate_domain(self, domain): - if not domain: - print("Error: Domain is required") - sys.exit(1) - if not re.match(r"^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$", domain): - print("Error: Invalid domain name") - sys.exit(1) - - try: - hostname = socket.gethostname() - server_ip = socket.gethostbyname(hostname) - domain_ip = socket.gethostbyname(domain) - - if server_ip != domain_ip: - print(f"Warning: Domain {domain} does not point to this server's IP ({server_ip})") - print("Please ensure your DNS records are properly configured before proceeding.") - except socket.gaierror: - print(f"Warning: Could not resolve domain {domain}") - print("Please ensure your DNS records are properly configured before proceeding.") - - return domain diff --git a/package.json b/package.json index 303c5d98..6e2fa528 100644 --- a/package.json +++ b/package.json @@ -2,5 +2,5 @@ "name": "nixopus", "version": "0.1.0-alpha.11", "description": "A modern container management platform", - "private": true -} \ No newline at end of file + "private": false +} diff --git a/scripts/install-cli.sh b/scripts/install-cli.sh new file mode 100755 index 00000000..e981f9bb --- /dev/null +++ b/scripts/install-cli.sh @@ -0,0 +1,198 @@ +#!/bin/bash + +readonly RED='\033[0;31m' +readonly NC='\033[0m' + +# GitHub repository info +readonly REPO_URL="https://github.com/raghavyuva/nixopus" +readonly PACKAGE_JSON_URL="$REPO_URL/raw/master/package.json" + +# Logging functions +log_error() { echo -e "${RED}[ERROR]${NC} $1" >&2; } + +# Detect system architecture +detect_arch() { + local arch + arch=$(uname -m) + case "$arch" in + x86_64|amd64) echo "amd64" ;; + aarch64|arm64) echo "arm64" ;; + *) log_error "Unsupported architecture: $arch"; exit 1 ;; + esac +} + +# Detect OS and package manager +detect_os() { + case "$(uname -s)" in + Darwin*) + echo "tar" # macOS uses tar fallback + ;; + Linux*) + if command -v apt &>/dev/null; then + echo "deb" + elif command -v yum &>/dev/null || command -v dnf &>/dev/null; then + echo "rpm" + elif command -v apk &>/dev/null; then + echo "apk" + else + echo "tar" + fi + ;; + *) + echo "tar" # Default fallback + ;; + esac +} + +# Get CLI version and package list +get_package_info() { + local package_json + package_json=$(curl -s "$PACKAGE_JSON_URL" || { + log_error "Failed to fetch package.json from repository" + exit 1 + }) + + # Extract version and packages + CLI_VERSION=$(echo "$package_json" | grep -o '"cli-version":[[:space:]]*"[^"]*"' | cut -d'"' -f4) + CLI_PACKAGES=$(echo "$package_json" | grep -A 100 '"cli-packages"' | sed -n '/\[/,/\]/p' | grep -o '"[^"]*\..*"' | tr -d '"') + + if [[ -z "$CLI_VERSION" ]]; then + log_error "Could not find cli-version in package.json" + exit 1 + fi +} + +# Build package name based on system +build_package_name() { + local arch="$1" + local pkg_type="$2" + + case "$pkg_type" in + deb) echo "nixopus_${CLI_VERSION}_${arch}.deb" ;; + rpm) echo "nixopus-${CLI_VERSION}-1.$([ "$arch" = "amd64" ] && echo "x86_64" || echo "aarch64").rpm" ;; + apk) echo "nixopus_${CLI_VERSION}_${arch}.apk" ;; + tar) echo "nixopus-${CLI_VERSION}.tar" ;; + *) log_error "Unknown package type: $pkg_type"; exit 1 ;; + esac +} + +# Check if package exists in CLI packages list +package_exists() { + local package_name="$1" + echo "$CLI_PACKAGES" | grep -q "^$package_name$" +} + +# Download and install package +install_package() { + local arch="$1" + local pkg_type="$2" + local package_name + local download_url + local temp_file + + package_name=$(build_package_name "$arch" "$pkg_type") + + if ! package_exists "$package_name"; then + log_error "Package $package_name not found in available packages" + echo "$CLI_PACKAGES" + exit 1 + fi + + download_url="$REPO_URL/releases/download/nixopus-$CLI_VERSION/$package_name" + temp_file="/tmp/$package_name" + + curl -L -o "$temp_file" "$download_url" || { + log_error "Failed to download package" + exit 1 + } + + case "$pkg_type" in + deb) + sudo dpkg -i "$temp_file" || sudo apt-get install -f -y + ;; + rpm) + if command -v dnf &>/dev/null; then + sudo dnf install -y "$temp_file" + else + sudo yum install -y "$temp_file" + fi + ;; + apk) + sudo apk add --allow-untrusted "$temp_file" + ;; + tar) + tar -xf "$temp_file" -C /tmp + + # Try to install without sudo first (for macOS with writable /usr/local/bin) + if [[ -w /usr/local/bin ]] || mkdir -p /usr/local/bin 2>/dev/null; then + cp /tmp/usr/local/bin/nixopus /usr/local/bin/ + chmod +x /usr/local/bin/nixopus + else + # Fall back to sudo + sudo mkdir -p /usr/local/bin + sudo cp /tmp/usr/local/bin/nixopus /usr/local/bin/ + sudo chmod +x /usr/local/bin/nixopus + fi + + # On macOS, ensure /usr/local/bin is in PATH + if [[ "$(uname -s)" == "Darwin" ]]; then + if [[ ":$PATH:" != *":/usr/local/bin:"* ]]; then + export PATH="/usr/local/bin:$PATH" + fi + fi + ;; + esac + + # Cleanup + rm -f "$temp_file" +} + +# Verify installation +verify_installation() { + if ! command -v nixopus &>/dev/null; then + log_error "Installation verification failed. nixopus command not found." + exit 1 + fi +} + +# Main installation flow +main() { + # Detect system + local arch pkg_type + arch=$(detect_arch) + pkg_type=$(detect_os) + + # Get package information + get_package_info + + # Install package + install_package "$arch" "$pkg_type" + + # Verify installation + verify_installation +} +} + +# Check if running as root for package managers that need it +check_permissions() { + local pkg_type="$1" + case "$pkg_type" in + deb|rpm|apk) + if [[ $EUID -ne 0 ]] && ! sudo -n true 2>/dev/null; then + echo "This script requires sudo privileges for package installation." + fi + ;; + tar) + if [[ "$(uname -s)" != "Darwin" ]] || [[ ! -w /usr/local/bin ]]; then + if [[ $EUID -ne 0 ]] && ! sudo -n true 2>/dev/null; then + echo "This script requires sudo privileges to install to /usr/local/bin." + fi + fi + ;; + esac +} + +# Run main function with permission check +pkg_type=$(detect_os) +check_permissions "$pkg_type" +main "$@" \ No newline at end of file diff --git a/scripts/install.sh b/scripts/install.sh old mode 100755 new mode 100644 index 0d557d6f..68e62e57 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -1,122 +1,8 @@ #!/bin/bash -set -euo pipefail +set -e -# check if the script is running as root -if [ "$EUID" -ne 0 ]; then - echo "Error: Please run as root (sudo)" >&2 - exit 1 -fi +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# check if the required commands are installed -function check_command() { - local cmd="$1" - if ! command -v "$cmd" &>/dev/null; then - echo "Error: '$cmd' is not installed. Please install '$cmd' before running this script." >&2 - exit 1 - fi -} - -# check if the required python version is installed -function check_python_version() { - if ! python3 --version | grep -q "Python 3.10"; then - echo "Error: Python 3.10 is not installed. Please install Python 3.10 before running this script." >&2 - exit 1 - fi -} - -# check if the required dependencies are installed -function check_dependencies() { - check_command "python3" - check_command "pip3" - check_command "git" - # check_python_version -} - -function parse_command_line_arguments() { - local env="production" - for arg in "$@"; do - case "$arg" in - --env=*) - env="${arg#*=}" - ;; - esac - done - echo "$env" -} - -function setup_config_based_on_environment() { - local env="$1" - if [ "$env" == "staging" ]; then - NIXOPUS_DIR="/etc/nixopus-staging" - SOURCE_DIR="$NIXOPUS_DIR/source" - BRANCH="feat/develop" - else - NIXOPUS_DIR="/etc/nixopus" - SOURCE_DIR="$NIXOPUS_DIR/source" - BRANCH="master" - fi -} - -function create_nixopus_directories() { - mkdir -p "${NIXOPUS_DIR:?}" - mkdir -p "${SOURCE_DIR:?}" -} - -function clone_nixopus_repository() { - if [ -d "${SOURCE_DIR:?}/.git" ]; then - cd "${SOURCE_DIR:?}" || exit 1 - git fetch --all > /dev/null 2>&1 - git reset --hard "origin/${BRANCH:?}" > /dev/null 2>&1 - git checkout "${BRANCH:?}" > /dev/null 2>&1 - git pull > /dev/null 2>&1 - else - rm -rf "${SOURCE_DIR:?}"/* "${SOURCE_DIR:?}"/.[!.]* - git clone https://github.com/raghavyuva/nixopus.git "${SOURCE_DIR:?}" - cd "${SOURCE_DIR:?}" || exit 1 - git checkout "${BRANCH:?}" > /dev/null 2>&1 - fi -} - -function setup_caddy_configuration() { - rm -rf "${NIXOPUS_DIR:?}/caddy" - mkdir -p "${NIXOPUS_DIR:?}/caddy" - # todo : take the port from the config file instead of hardcoding it - echo '{ - admin 0.0.0.0:2019 - log { - format json - level INFO - } - }' > "${NIXOPUS_DIR:?}/caddy/Caddyfile" -} - -function setup_nixopus_installation_environment() { - cd "${SOURCE_DIR:?}/installer" || exit 1 - python3 -m venv venv - source venv/bin/activate - pip install --upgrade pip > /dev/null 2>&1 - pip install -r requirements.txt > /dev/null 2>&1 -} - -function run_installer() { - PYTHONPATH="${SOURCE_DIR:?}/installer" python3 install.py "$@" -} - -function deactivate_virtual_environment() { - deactivate -} - -function main() { - check_dependencies - ENV=$(parse_command_line_arguments "$@") - setup_config_based_on_environment "$ENV" - create_nixopus_directories - clone_nixopus_repository - setup_caddy_configuration - setup_nixopus_installation_environment - run_installer "$@" - deactivate_virtual_environment -} - -main "$@" \ No newline at end of file +bash "$SCRIPT_DIR/install-cli.sh" +nixopus install \ No newline at end of file diff --git a/scripts/setup.sh b/scripts/setup.sh deleted file mode 100755 index 12cf2c63..00000000 --- a/scripts/setup.sh +++ /dev/null @@ -1,967 +0,0 @@ -#!/usr/bin/env bash - -# Nixopus Development Environment Setup Script -# -# This script sets up the development environment for the project -# Supported platforms: Linux (Ubuntu, CentOS, Fedora, Arch) and macOS - -# Prerequisites: -# - Linux: Run with sudo privileges -# - macOS: Homebrew should be installed (https://brew.sh) -# Docker Desktop for Mac should be installed and running - -# Usage: -# - Linux: sudo ./setup.sh [OPTIONS] -# - macOS: ./setup.sh [OPTIONS] (no sudo required) -# -# Port Configuration: -# Use --help to see available port configuration options -# Example: ./setup.sh --api-port 8081 --view-port 3001 - -set -euo pipefail - - -BRANCH="feat/dev_environment" -OS="$(uname)" - -# Default port configurations -DEFAULT_API_PORT=8080 -DEFAULT_VIEW_PORT=7443 -DEFAULT_DB_PORT=5432 - -function detect_package_manager() { - if [[ "$OS" == "Darwin" ]]; then - if command -v brew &>/dev/null; then - echo "brew" - else - echo "Error: Homebrew not found. Please install Homebrew first: https://brew.sh" >&2 - exit 1 - fi - elif command -v apt-get &>/dev/null; then - echo "apt" - elif command -v dnf &>/dev/null; then - echo "dnf" - elif command -v yum &>/dev/null; then - echo "yum" - elif command -v pacman &>/dev/null; then - echo "pacman" - else - echo "Error: Unsupported package manager" >&2 - exit 1 - fi -} - -function install_package() { - local pkg_manager - pkg_manager=$(detect_package_manager) - - case $pkg_manager in - "brew") - brew install "$1" - ;; - "apt") - apt-get update - apt-get install -y "$1" - ;; - "dnf") - dnf install -y "$1" - ;; - "yum") - yum install -y "$1" - ;; - "pacman") - pacman -Sy --noconfirm "$1" - ;; - esac -} - -# check if the os is linux or macOS -function check_os() { - if [[ "$OS" != "Linux" && "$OS" != "Darwin" ]]; then - echo "Error: This script is only supported on Linux and macOS." >&2 - exit 1 - fi -} - -# check for Docker availability globally -function check_docker() { - if ! command -v docker &>/dev/null; then - if [[ "$OS" == "Darwin" ]]; then - echo "Error: Docker not found. Please ensure Docker Desktop for Mac is installed and running." - echo "Download from: https://www.docker.com/products/docker-desktop" - else - echo "Error: Docker not found. Please install Docker first." - echo "You can install it using your package manager or from: https://docs.docker.com/engine/install/" - fi - exit 1 - fi - - # Check if Docker daemon is running - if ! docker info &>/dev/null; then - echo "Error: Docker daemon is not running. Please start Docker service." - exit 1 - fi - - echo "Docker check completed." -} - -# check for prerequisites on macOS -function check_macos_prerequisites() { - if [[ "$OS" == "Darwin" ]]; then - echo "Checking macOS prerequisites" - - # Check for Homebrew - if ! command -v brew &>/dev/null; then - echo "Error: Homebrew is required on macOS but not found." >&2 - echo "Please install Homebrew first by running:" >&2 - echo '/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"' >&2 - exit 1 - fi - - echo "macOS prerequisites check completed." - fi -} - -# check if the script is running as root (only required for Linux) -function check_root() { - if [[ "$OS" == "Linux" && "$EUID" -ne 0 ]]; then - echo "Error: Please run as root (sudo) on Linux systems" >&2 - exit 1 - elif [[ "$OS" == "Darwin" && "$EUID" -eq 0 ]]; then - echo "Warning: consider running without sudo on macos as it is not required." >&2 - fi -} - -# check if the required commands are installed -function check_command() { - local cmd="$1" - if ! command -v "$cmd" &>/dev/null; then - echo "Command '$cmd' not found. Attempting to install" - case "$cmd" in - "git") - install_package "git" - ;; - "yarn") - if [[ "$OS" == "Darwin" ]]; then - install_package "node" - install_package "yarn" - else - install_package "nodejs" - install_package "npm" - npm install -g yarn - fi - ;; - *) - echo "Error: Automatic installation not supported for '$cmd'" >&2 - exit 1 - ;; - esac - fi -} - -function check_required_commands() { - local commands=("git" "yarn" "go") - for cmd in "${commands[@]}"; do - check_command "$cmd" - done -} - -# check if the go version is installed -function check_go_version() { - local go_version=$(go version | awk '{print $3}' | sed 's/^go//') - local required_version="1.23.4" - - local ver_num=$(echo "$go_version" | sed 's/\.//g') - local req_num=$(echo "$required_version" | sed 's/\.//g') - - if [ "$ver_num" -lt "$req_num" ]; then - echo "Error: Go version $required_version or higher is required. Current version: $go_version" >&2 - exit 1 - fi -} - -# clone the nixopus repository -function clone_nixopus() { - if [ -d "nixopus" ]; then - echo "nixopus directory already exists, please remove it manually and run the script again" - exit 1 - fi - if ! git clone --branch "$BRANCH" https://github.com/raghavyuva/nixopus.git; then - echo "Error: Failed to clone nixopus repository" >&2 - exit 1 - fi -} - -# checkout to the branch -function checkout_branch() { - local branch="$1" - if ! git checkout "$branch"; then - echo "Error: Failed to checkout to $branch" >&2 - exit 1 - fi -} - -# move to the folder -function move_to_folder() { - local folder="$1" - if ! cd "$folder"; then - echo "Error: Failed to change directory to $folder" >&2 - exit 1 - fi -} - -# get architecture name in format expected by most download URLs -function get_arch_name() { - local arch - arch=$(uname -m) - case "$arch" in - x86_64) echo "amd64" ;; - aarch64|arm64) echo "arm64" ;; - *) echo "Unsupported architecture: $arch" >&2; exit 1 ;; - esac -} - -# get OS name in lowercase format (for downloads, packages, etc.) -function get_os_name() { - case "$OS" in - "Linux") echo "linux" ;; - "Darwin") echo "darwin" ;; - *) echo "Unsupported OS: $OS" >&2; exit 1 ;; - esac -} - -function install_go() { - local version="1.23.4" - local arch - arch=$(get_arch_name) - - local os - os=$(get_os_name) - - local temp_dir - temp_dir=$(mktemp -d) - - echo "Downloading Go ${version}" - if ! curl -L "https://go.dev/dl/go${version}.${os}-${arch}.tar.gz" -o "${temp_dir}/go.tar.gz"; then - echo "Error: Failed to download Go" >&2 - rm -rf "$temp_dir" - exit 1 - fi - - echo "Verifying checksum" - local checksum_url="https://go.dev/dl/go${version}.${os}-${arch}.tar.gz.sha256" - local expected_sum - expected_sum=$(curl -sL "$checksum_url" | awk '{print $1}') - local actual_sum - if [[ "$OS" == "Darwin" ]]; then - actual_sum=$(shasum -a 256 "${temp_dir}/go.tar.gz" | awk '{print $1}') - else - actual_sum=$(sha256sum "${temp_dir}/go.tar.gz" | awk '{print $1}') - fi - - if [[ $expected_sum != $actual_sum ]]; then - echo "Error: Checksum mismatch for Go archive" >&2 - rm -rf "$temp_dir" - exit 1 - fi - - echo "Installing Go ${version}" - local go_install_path - if [[ "$OS" == "Darwin" ]]; then # incase of macOS - go_install_path="/usr/local" - if [[ "$EUID" -ne 0 ]]; then - echo "Installing Go to user directory" - go_install_path="$HOME" - fi - else - go_install_path="/usr/local" - fi - - if ! rm -rf "${go_install_path}/go" && tar -C "${go_install_path}" -xzf "${temp_dir}/go.tar.gz"; then - echo "Error: Failed to install Go" >&2 - rm -rf "$temp_dir" - exit 1 - fi - - rm -rf "$temp_dir" - - # Set up PATH - if [[ "$OS" == "Darwin" ]]; then - local shell_profile - if [[ "$SHELL" == *"zsh"* ]]; then - shell_profile="$HOME/.zshrc" - else - shell_profile="$HOME/.bash_profile" - fi - - if ! grep -q "${go_install_path}/go/bin" "$shell_profile" 2>/dev/null; then - echo "export PATH=\$PATH:${go_install_path}/go/bin" >> "$shell_profile" - echo "Added Go to PATH in $shell_profile" - fi - export PATH="$PATH:${go_install_path}/go/bin" - else - if ! grep -q "/usr/local/go/bin" /etc/profile.d/go.sh 2>/dev/null; then - echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile.d/go.sh - chmod +x /etc/profile.d/go.sh - source /etc/profile.d/go.sh - fi - fi -} - -# check if the go version is installed else install it 1.23.4 -function check_go_version() { - if ! command -v go &>/dev/null; then - echo "Go is not installed. Installing" - install_go - fi -} - -# install air hot reload for golang -function install_air_hot_reload(){ - local user_home - user_home=$(get_user_home) - local air_path="$user_home/go/bin/air" - - # Check if air is already installed - if command -v air &>/dev/null || [[ -f "$air_path" ]]; then - echo "Air hot reload is already installed, skipping installation" - export PATH="$PATH:$user_home/go/bin" - return 0 - fi - - echo "Installing Air hot reload" - sudo -u "${SUDO_USER:-$USER}" env GOPATH="$user_home/go" go install github.com/air-verse/air@latest - export PATH="$PATH:$user_home/go/bin" - - # Verify installation - if command -v air &>/dev/null || [[ -f "$air_path" ]]; then - echo "Air hot reload installed successfully" - else - echo "Warning: Air installation may have failed" - fi -} - -# load the env variables from the api/.env.sample file -function load_api_env_variables(){ - move_to_folder "api" - if [ -f .env.sample ]; then - while IFS='=' read -r key value; do - # Skip empty lines and comments - [[ -z "$key" || "$key" =~ ^# ]] && continue - # Remove any quotes from the value - value=$(echo "$value" | tr -d '"'"'") - # Export the variable - export "$key=$value" - done < .env.sample - else - echo "Error: .env.sample file not found in api directory" >&2 - exit 1 - fi - move_to_folder ".." -} - -# setup postgres with docker -function setup_postgres_with_docker(){ - load_api_env_variables - - # Check if container already exists - if docker ps -a --format 'table {{.Names}}' | grep -q "^nixopus-db$"; then - echo "Already nixopus-db container exists" - return 0 - fi - - # Start PostgreSQL container with credentials matching .env.sample - docker run -d --name nixopus-db \ - -e POSTGRES_USER="${USERNAME:-postgres}" \ - -e POSTGRES_PASSWORD="${PASSWORD:-12344}" \ - -e POSTGRES_DB="${DB_NAME:-postgres}" \ - -e POSTGRES_HOST_AUTH_METHOD=trust \ - -p "${DB_PORT:-5432}:5432" \ - --health-cmd="pg_isready -U ${USERNAME:-postgres} -d ${DB_NAME:-postgres}" \ - postgres:14-alpine - - echo "Waiting for PostgreSQL to be ready" - sleep 5 - - # Wait for PostgreSQL to be ready - local max_attempts=30 - local attempt=1 - while [ $attempt -le $max_attempts ]; do - if docker exec nixopus-db pg_isready -U "${USERNAME:-postgres}" -d "${DB_NAME:-postgres}" >/dev/null 2>&1; then - echo "PostgreSQL is ready!" - break - fi - echo "Waiting for PostgreSQL (attempt $attempt/$max_attempts)" - sleep 2 - attempt=$((attempt + 1)) - done - - if [ $attempt -gt $max_attempts ]; then - echo "Error: PostgreSQL failed to start within expected time" >&2 - exit 1 - fi - - echo "Postgres setup completed successfully" -} - -# verify database connection -function verify_database_connection(){ - echo "Verifying database connection" - load_api_env_variables - - # Test connection using docker exec - if docker exec nixopus-db psql -U "${USERNAME:-postgres}" -d "${DB_NAME:-postgres}" -c "SELECT 1;" >/dev/null 2>&1; then - echo "Database connection verified successfully" - else - echo "Error: Failed to connect to database" >&2 - echo "Details: user=${USERNAME:-postgres}, db=${DB_NAME:-postgres}, host=${HOST_NAME:-localhost}, port=${DB_PORT:-5432}" >&2 - exit 1 - fi -} - -# setup ssh will create a ssh key and add it to the authorized_keys file -function setup_ssh(){ - local user_home - user_home=$(get_user_home) - local ssh_dir="$user_home/.ssh" - local private_key="$ssh_dir/id_ed25519_nixopus" - local public_key="$ssh_dir/id_ed25519_nixopus.pub" - - if [[ -f "$private_key" && -f "$public_key" ]]; then - echo "SSH key for Nixopus already exists, skipping ssh setup" - return 0 - fi - - echo "setting up SSH config" - - # Check if ssh-keygen is available and install if needed - if ! command -v ssh-keygen &>/dev/null; then - echo "Installing openssh" - if [[ "$OS" == "Darwin" ]]; then - install_package "openssh" - else - case $(detect_package_manager) in - "apt") install_package "openssh-client" ;; - "dnf"|"yum") install_package "openssh-clients" ;; - "pacman") install_package "openssh" ;; - esac - fi - fi - - # Check SSH daemon availability on macOS - if [[ "$OS" == "Darwin" ]]; then - echo "Checking SSH daemon (Remote Login) status on macOS " - - # Check if SSH daemon is running - if ! sudo launchctl list | grep -q "com.openssh.sshd"; then - echo "" - echo "WARNING: SSH Remote Login is not enabled on this macOS system!" - echo "" - echo "To enable SSH Remote Login, please follow these steps:" - echo "1. Open System Settings (or System Preferences on older macOS versions)" - echo "2. Go to General → Sharing (or just Sharing on older versions)" - echo "3. Turn on 'Remote Login'" - echo "4. You can choose to allow access for:" - echo " - All users, or" - echo " - Only specific users (recommended for security)" - echo "" - echo "alternatively, you can enable it via command line by running:" - echo " sudo systemsetup -setremotelogin on" - echo "" - echo "after enabling Remote Login, please run this setup script again." - echo "" - read -p "press Enter to continue with SSH key generation (you'll still need to enable Remote Login) " - else - echo "SSH Remote Login is already enabled on macOS" - fi - fi - - local authorized_keys="$ssh_dir/authorized_keys" - - mkdir -p "$ssh_dir" && chmod 700 "$ssh_dir" - - echo "Generating Nixopus SSH key" - ssh-keygen -t ed25519 -f "$private_key" -N "" -C "nixopus-$(whoami)@$(hostname)-$(date +%Y%m%d)" - chmod 600 "$private_key" && chmod 644 "$public_key" - - if [[ ! -f "$authorized_keys" || ! $(grep -Fq "$(cat "$public_key")" "$authorized_keys" 2>/dev/null) ]]; then - cat "$public_key" >> "$authorized_keys" && chmod 600 "$authorized_keys" - echo "Nixopus public key added to authorized_keys" - fi - - echo "Nixopus SSH setup is done" -} - -# Function to update SSH configuration in environment files -function update_ssh_env_config(){ - local user_home - user_home=$(get_user_home) - local ssh_dir="$user_home/.ssh" - local private_key="$ssh_dir/id_ed25519_nixopus" - local current_user=$(whoami) - - echo "Updating SSH configuration in environment files " - - # Update API environment file - if [[ -f "api/.env" ]]; then - # Update SSH settings in API .env file - sed -i.bak "s|SSH_HOST=.*|SSH_HOST=localhost|g" api/.env - sed -i.bak "s|SSH_PORT=.*|SSH_PORT=22|g" api/.env - sed -i.bak "s|SSH_USER=.*|SSH_USER=$current_user|g" api/.env - - # For development environment, set up SSH private key (recommended) - # Users can optionally use SSH_PASSWORD for development if preferred - if grep -q "SSH_PRIVATE_KEY=" api/.env; then - sed -i.bak "s|SSH_PRIVATE_KEY=.*|SSH_PRIVATE_KEY=$private_key|g" api/.env - else - # Add SSH_PRIVATE_KEY if it doesn't exist - echo "SSH_PRIVATE_KEY=$private_key" >> api/.env - fi - - # Ensure SSH_PASSWORD exists as commented option for development - if ! grep -q "SSH_PASSWORD=" api/.env; then - echo "# SSH_PASSWORD=" >> api/.env - fi - - # Remove backup file - rm -f api/.env.bak - - echo "SSH configuration updated in api/.env" - echo " - SSH_HOST: localhost" - echo " - SSH_PORT: 22" - echo " - SSH_USER: $current_user" - echo " - SSH_PRIVATE_KEY: $private_key (recommended for production)" - echo " - SSH_PASSWORD: Available as commented option for development" - echo "" - echo "Note: For development, you can uncomment SSH_PASSWORD and comment out SSH_PRIVATE_KEY if preferred" - else - echo "Warning: api/.env file not found, SSH configuration not updated" - fi -} - -# Update environment files with custom port configurations -function update_port_configurations(){ - echo "Updating port configurations " - - # Update API environment file - if [[ -f "api/.env" ]]; then - sed -i.bak "s|PORT=.*|PORT=$API_PORT|g" api/.env - sed -i.bak "s|DB_PORT=.*|DB_PORT=$DB_PORT|g" api/.env - sed -i.bak "s|ALLOWED_ORIGIN=.*|ALLOWED_ORIGIN=http://localhost:$VIEW_PORT|g" api/.env - - rm -f api/.env.bak - echo "Updated API environment with custom ports" - fi - - # Update view environment file - if [[ -f "view/.env" ]]; then - sed -i.bak "s|PORT=.*|PORT=$VIEW_PORT|g" view/.env - sed -i.bak "s|NEXT_PUBLIC_PORT=.*|NEXT_PUBLIC_PORT=$VIEW_PORT|g" view/.env - - rm -f view/.env.bak - echo "Updated view environment with custom ports" - fi - - echo "Port configurations updated successfully" - echo " - API Port: $API_PORT" - echo " - Frontend Port: $VIEW_PORT" - echo " - Database Port: $DB_PORT" -} - -# setup environment variables -function setup_environment_variables(){ - move_to_folder "api" - if [ -f .env.sample ]; then - cp .env.sample .env || { echo "Error: Failed to copy api/.env.sample to .env" >&2; exit 1; } - else - echo "Error: api/.env.sample file not found" >&2 - exit 1 - fi - move_to_folder ".." - - move_to_folder "view" - if [ -f .env.sample ]; then - cp .env.sample .env || { echo "Error: Failed to copy view/.env.sample to .env" >&2; exit 1; } - else - echo "Error: view/.env.sample file not found" >&2 - exit 1 - fi - move_to_folder ".." - echo "Environment variables setup completed successfully" -} - -# start the api server -function start_api(){ - move_to_folder "api" - go mod tidy - go mod download - - local user_home - user_home=$(get_user_home) - - echo "API server started with air hot reload" - echo "Logs can be found in api.log" - echo "You can stop the server using 'pkill -f air' command" - nohup "$user_home/go/bin/air" > api.log 2>&1 & - -} - -open_discord_gh_link() { - - local url="https://discord.com/invite/skdcq39Wpv" - local gh_url="https://github.com/raghavyuva/nixopus/" - - case "$OS" in - Darwin) - open "$url" 2>/dev/null || echo "Could not open browser on macOS" - open "$gh_url" 2>/dev/null || echo "Could not open browser on macOS" - ;; - Linux) - if command -v xdg-open &>/dev/null; then - xdg-open "$url" 2>/dev/null || echo "Could not open Discord link" - xdg-open "$gh_url" 2>/dev/null || echo "Could not open GitHub link" - else - echo "Warning: Could not auto-open browser." >&2 - fi - ;; - *) - echo "Warning: Unsupported OS for browser launch." >&2 - ;; - esac -} - - -# start the view server -function start_view(){ - move_to_folder "view" - yarn install --frozen-lockfile - - # Read PORT from .env file - local view_port=7443 # default fallback - if [[ -f ".env" ]]; then - view_port=$(grep "^PORT=" .env | cut -d'=' -f2 | tr -d ' ') - view_port=${view_port:-7443} # fallback if empty - fi - - echo "View server started on port $view_port" - echo "Logs can be found in view.log" - echo "You can stop the server using 'pkill -f yarn' command" - nohup yarn run dev -- -p "$view_port" > view.log 2>&1 & - -} - -# Check if a port is available -function is_port_available() { - local port=$1 - local host=${2:-localhost} - - # Try to connect to the port, if it fails the port is available - ! nc -z "$host" "$port" 2>/dev/null -} - -# Validate that a port number is valid -function validate_port() { - local port=$1 - local port_name=$2 - - if ! [[ "$port" =~ ^[0-9]+$ ]] || [ "$port" -lt 1 ] || [ "$port" -gt 65535 ]; then - echo "Error: Invalid $port_name port '$port'. Port must be a number between 1 and 65535." - return 1 - fi - - if [ "$port" -lt 1024 ] && [ "$EUID" -ne 0 ]; then - echo "Warning: $port_name port $port is below 1024 and may require root privileges." - fi - - return 0 -} - -# Parse command line arguments for custom ports -function parse_arguments() { - while [[ $# -gt 0 ]]; do - case $1 in - --api-port) - API_PORT="$2" - shift 2 - ;; - --view-port) - VIEW_PORT="$2" - shift 2 - ;; - --db-port) - DB_PORT="$2" - shift 2 - ;; - --help|-h) - show_usage - exit 0 - ;; - *) - echo "Unknown option: $1" - show_usage - exit 1 - ;; - esac - done - - # Set defaults if not provided - API_PORT=${API_PORT:-$DEFAULT_API_PORT} - VIEW_PORT=${VIEW_PORT:-$DEFAULT_VIEW_PORT} - DB_PORT=${DB_PORT:-$DEFAULT_DB_PORT} - - # Validate all ports - validate_port "$API_PORT" "API" || exit 1 - validate_port "$VIEW_PORT" "View" || exit 1 - validate_port "$DB_PORT" "Database" || exit 1 -} - -# Show usage information -function show_usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Options:" - echo " --api-port PORT Set API server port (default: $DEFAULT_API_PORT)" - echo " --view-port PORT Set frontend server port (default: $DEFAULT_VIEW_PORT)" - echo " --db-port PORT Set database port (default: $DEFAULT_DB_PORT)" - echo " --help, -h Show this help message" - echo "" - echo "After setup completion, default admin credentials will be created:" - echo " Email: \$USER@example.com (where \$USER is your system username)" - echo " Password: Nixopus123!" - echo "" -} - -# Check availability of all required ports -function check_port_availability() { - echo "Checking port availability " - - # Check API port - if ! is_port_available "$API_PORT"; then - echo "Error: API port $API_PORT is already in use." - echo "Please use a different port: ./setup.sh --api-port " - exit 1 - fi - - # Check Frontend port - if ! is_port_available "$VIEW_PORT"; then - echo "Error: Frontend port $VIEW_PORT is already in use." - echo "Please use a different port: ./setup.sh --view-port " - exit 1 - fi - - # Check Database port - if ! is_port_available "$DB_PORT"; then - echo "Error: Database port $DB_PORT is already in use." - echo "Please use a different port: ./setup.sh --db-port " - exit 1 - fi - - echo "All required ports are available." - return 0 -} - -# Function to perform comprehensive SSH health checks -function ssh_health_check(){ - local user_home - user_home=$(get_user_home) - local ssh_dir="$user_home/.ssh" - local private_key="$ssh_dir/id_ed25519_nixopus" - local current_user=$(whoami) - - echo "Running SSH health check " - - # 1. Check SSH keys exist - if [[ ! -f "$private_key" || ! -f "$private_key.pub" ]]; then - echo "Error: SSH keys missing" - return 1 - fi - - # 2. Quick SSH connection test - if timeout 10 ssh -o ConnectTimeout=5 -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i "$private_key" "$current_user@localhost" "exit" &>/dev/null; then - echo "SSH connection successful" - return 0 - else - echo "SSH connection failed - please enable Remote Login in System Settings → Sharing (macOS) or start SSH service (Linux)" - return 1 - fi -} - -# Get the correct user home directory (handles sudo scenarios) -function get_user_home(){ - local user_home - if [[ "$OS" == "Darwin" ]]; then - user_home="$HOME" - if [[ "$EUID" -eq 0 && -n "${SUDO_USER:-}" ]]; then - user_home=$(eval echo "~$SUDO_USER") - fi - else - user_home=$(eval echo ~${SUDO_USER:-$USER}) - fi - echo "$user_home" -} - -function create_admin_credentials() { - echo "Creating default admin credentials " - - # Simple wait for API to be ready - local retries=3 - while [ $retries -gt 0 ]; do - if curl -s "http://localhost:$API_PORT/api/v1/health" >/dev/null 2>&1; then - break - fi - echo "Waiting for API server ($retries attempts left)" - sleep 2 - retries=$((retries - 1)) - done - - if [ $retries -eq 0 ]; then - echo "Warning: API server not responding, skipping admin creation" - return 1 - fi - - # Check if admin already exists - if curl -s "http://localhost:$API_PORT/api/v1/auth/is-admin-registered" | grep -q '"admin_registered":true'; then - echo "Admin already registered, skipping" - return 0 - fi - - # Create admin user - local username=${USER:-"admin"} - local email="${username}@example.com" - local password="Nixopus123!" - - echo "Creating admin: $email" - - if curl -s "http://localhost:$API_PORT/api/v1/auth/register" \ - -H 'content-type: application/json' \ - -d "{\"email\":\"$email\",\"password\":\"$password\",\"username\":\"$username\",\"type\":\"admin\"}" \ - | grep -q '"status":"success"'; then - - echo "Admin credentials created successfully!" - echo "Email: $email | Password: $password" - echo "Login at: http://localhost:$VIEW_PORT" - else - echo "Failed to create admin credentials" - echo "Manual command:" - echo "curl -v 'http://localhost:$API_PORT/api/v1/auth/register' -H 'content-type: application/json' -d '{\"email\":\"$email\",\"password\":\"$password\",\"username\":\"$username\",\"type\":\"admin\"}'" - fi -} - -# main function -function main() { - # Parse command line arguments first - parse_arguments "$@" - - echo "Starting Nixopus development environment setup" - check_os - check_macos_prerequisites - check_root - check_docker - check_required_commands - - # Check if ports are available before proceeding - if ! check_port_availability; then - echo "Setup cannot continue due to port conflicts. Please resolve them and try again." - exit 1 - fi - - check_go_version - clone_nixopus - move_to_folder "nixopus" - checkout_branch "$BRANCH" - install_air_hot_reload - echo "Nixopus repository cloned and configured successfully" - - setup_postgres_with_docker - verify_database_connection - setup_environment_variables - setup_ssh - update_ssh_env_config - update_port_configurations - - # Perform SSH health checks - echo "Running SSH health checks " - if ! ssh_health_check; then - if [[ "$OS" == "Darwin" ]]; then - echo "Enabling SSH Remote Login..." - sudo systemsetup -setremotelogin on &>/dev/null - else - echo "Starting SSH service..." - sudo systemctl start ssh &>/dev/null || sudo systemctl start sshd &>/dev/null - fi - - sleep 2 - if ssh_health_check; then - echo "SSH working!" - else - echo "SSH still not working, continuing setup..." - fi - fi - - echo "SSH setup completed successfully" - - start_api - echo "API server started successfully" - move_to_folder ".." - start_view - echo "View server started successfully" - - echo "Waiting for applications to fully initialize" - sleep 3 - - RED='\e[31m' - GREEN='\e[32m' - YELLOW='\e[33m' - BLUE='\e[34m' - MAGENTA='\e[35m' - CYAN='\e[36m' - RESET='\e[0m' - - printf "${CYAN} _ __ ____ _ __ ____ ____ __ __ _____${RESET}\n" - printf "${GREEN} / | / / / _/ | |/ / / __ \\\\ / __ \\\\ / / / / / ___/${RESET}\n" - printf "${YELLOW} / |/ / / / | / / / / / / /_/ / / / / / \\\\__ \\\\ ${RESET}\n" - printf "${BLUE} / /| / _/ / / | / /_/ / / ____/ / /_/ / ___/ / ${RESET}\n" - printf "${MAGENTA}/_/ |_/ /___/ /_/|_| \\\\____/ /_/ \\\\____/ /____/ ${RESET}\n" - printf "\n" - - - # Create default admin credentials - create_admin_credentials - echo "Nixopus development environment setup completed successfully" - echo "-------------------------------------------------------------" - echo "" - echo "=== Application Access ===" - echo "Frontend: http://localhost:$VIEW_PORT" - echo "API: http://localhost:$API_PORT" - echo "Database: localhost:$DB_PORT" - echo "" - echo "=== Default Login Credentials ===" - echo "Email: ${USER:-admin}@example.com" - echo "Password: Nixopus123!" - echo "Change these credentials after first login!" - echo "" - echo "=== Troubleshooting ===" - echo "If you encounter database connection issues:" - echo "1. Check if Docker container is running: docker ps | grep nixopus-db" - echo "2. Check database logs: docker logs nixopus-db" - echo "3. Verify connection: docker exec nixopus-db psql -U postgres -d postgres -c 'SELECT 1;'" - echo "4. Restart the database: docker restart nixopus-db" - echo "" - echo "To manually create admin credentials later:" - echo " Use the curl command shown above" - echo "" - echo "Log files:" - echo "- API logs: nixopus/api/api.log" - echo "- View logs: nixopus/view/view.log" - echo "----------------------------------------------------------------------------" - - echo "" - echo "Need help or have questions?" - echo ">>>> Join our Discord :: https://discord.com/invite/skdcq39Wpv" - echo ">>>> Star us on GitHub: https://github.com/raghavyuva/nixopus/" - echo ">>>> Raise issues on GitHub Issues: https://github.com/raghavyuva/nixopus/issues" - open_discord_gh_link - -} - -main "$@" diff --git a/view/app/containers/page.tsx b/view/app/containers/page.tsx index 2639cada..b5ba6087 100644 --- a/view/app/containers/page.tsx +++ b/view/app/containers/page.tsx @@ -14,6 +14,8 @@ import { Skeleton } from '@/components/ui/skeleton'; import DisabledFeature from '@/components/features/disabled-feature'; import { ResourceGuard, AnyPermissionGuard } from '@/components/rbac/PermissionGuard'; import useContainerList from './hooks/use-container-list'; +import { TypographyH1, TypographyMuted } from '@/components/ui/typography'; +import { useTranslation } from '@/hooks/use-translation'; interface ContainerActionsProps { container: any; @@ -78,21 +80,28 @@ interface ContainerInfoProps { } const ContainerInfo = ({ container }: ContainerInfoProps) => { + const { t } = useTranslation(); return ( -
+
Ports:
- {container?.ports?.map((port: any) => ( - - {port.public_port} → {port.private_port} - - ))} + {container?.ports?.length > 0 ? ( + container.ports.map((port: any) => ( + + {port.public_port} → {port.private_port} + + )) + ) : ( + {t("containers.no_ports_exposed")} + )}
Memory: - {(container.host_config.memory / (1024 * 1024)).toFixed(2)} MB + + {`${(container.host_config.memory / (1024 * 1024)).toFixed(2)} MB`} +
); @@ -114,28 +123,32 @@ const ContainerCard = ({ return (
- -
-
-

{container.name}

-

{container.image}

+ +
+
+

{container.name}

+

{container.image}

{container.status}
- +
+ +
+
+
+
-
); @@ -232,7 +245,10 @@ export default function ContainersPage() {
-

{t('containers.title')}

+ + {t('containers.title')} + {t('containers.description')} +