From 7743c67ba89e937b7ddf5b132fc879c4cd21ad4d Mon Sep 17 00:00:00 2001 From: Wim Looman Date: Wed, 15 Nov 2023 15:16:17 +0100 Subject: [PATCH 1/7] Update readme for docker-compose v2 --- README.md | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index b678d9a38..42627c854 100644 --- a/README.md +++ b/README.md @@ -70,8 +70,7 @@ mkdir -p ignored/cratesfyi-prefix/crates.io-index # Builds the docs.rs binary SQLX_OFFLINE=1 cargo build # Start the external services. -# It may be `docker compose` in newer versions -docker-compose up -d db s3 +docker compose up -d db s3 # anything that doesn't run via docker-compose needs the settings defined in # .env. Either via `. ./.env` as below, or via any dotenv shell integration. . ./.env @@ -129,7 +128,7 @@ npm install browser-ui-test ### Pure docker-compose -If you have trouble with the above commands, consider using `docker-compose up --build`, +If you have trouble with the above commands, consider using `docker compose up --build`, which uses docker-compose for the web server as well. This will not cache dependencies - in particular, you'll have to rebuild all 400 whenever the lockfile changes - but makes sure that you're in a known environment so you should have fewer problems getting started. @@ -137,11 +136,18 @@ but makes sure that you're in a known environment so you should have fewer probl You can also use the `web` container to run builds on systems which don't support running builds directly (mostly on Mac OS or Windows): ```sh # run a build for a single crate -docker-compose run web build crate regex 1.3.1 +docker compose run web build crate regex 1.3.1 # or build essential files -docker-compose run web build add-essential-files +docker compose run web build add-essential-files # rebuild the web container when you changed code. -docker-compose build web +docker compose up -d web --build +``` + +You can also run other commands like the setup above from within the container: + +```sh +docker compose run --rm cli database migrate +docker compose run --rm cli build update-toolchain ``` Note that running tests is not supported when using pure docker-compose. @@ -163,14 +169,14 @@ Three services are defined: #### Rebuilding Containers -To rebuild the site, run `docker-compose build`. +To rebuild the site, run `docker compose build`. Note that docker-compose caches the build even if you change the source code, so this will be necessary anytime you make changes. If you want to completely clean up the database, don't forget to remove the volumes too: ```sh -$ docker-compose down --volumes +$ docker compose down --volumes ``` #### FAQ @@ -184,7 +190,7 @@ This is probably because you have `git.autocrlf` set to true, ##### I see the error `/opt/rustwide/cargo-home/bin/cargo: cannot execute binary file: Exec format error` when running builds. -You are most likely not on a Linux platform. Running builds directly is only supported on `x86_64-unknown-linux-gnu`. On other platforms you can use the `docker-compose run web build [...]` workaround described above. +You are most likely not on a Linux platform. Running builds directly is only supported on `x86_64-unknown-linux-gnu`. On other platforms you can use the `docker compose run web build [...]` workaround described above. See [rustwide#41](https://github.com/rust-lang/rustwide/issues/41) for more details about supporting more platforms directly. @@ -212,11 +218,11 @@ cargo run -- start-web-server ```sh # Builds and adds it into database # This is the main command to build and add a documentation into docs.rs. -# For example, `docker-compose run web build crate regex 1.1.6` +# For example, `docker compose run web build crate regex 1.1.6` cargo run -- build crate # alternatively, via the web container -docker-compose run web build crate +docker compose run web build crate # Builds every crate on crates.io and adds them into database # (beware: this may take months to finish) From c786404b98a31dc8908e5418215904a9a6451b2d Mon Sep 17 00:00:00 2001 From: Wim Looman Date: Fri, 13 Oct 2023 23:13:12 +0200 Subject: [PATCH 2/7] Update CI to use docker-compose v2 --- dockerfiles/run-gui-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/run-gui-tests.sh b/dockerfiles/run-gui-tests.sh index 8e556b849..1f4420739 100755 --- a/dockerfiles/run-gui-tests.sh +++ b/dockerfiles/run-gui-tests.sh @@ -5,7 +5,7 @@ set -e # Just in case it's running, we stop the web server. docker compose stop web -docker compose up -d db s3 +docker compose up --wait --wait-timeout 30 db s3 # If we have a .env file, we need to temporarily move it so # it doesn't make sqlx fail compilation. From e1f80bde05fda78ac3b1d95a3b1cda2ae0a82e82 Mon Sep 17 00:00:00 2001 From: Wim Looman Date: Sat, 21 Oct 2023 13:29:54 +0200 Subject: [PATCH 3/7] Don't bind internal docker-compose services to external IPs --- README.md | 6 +++--- docker-compose.yml | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 42627c854..894e7d56a 100644 --- a/README.md +++ b/README.md @@ -161,9 +161,9 @@ Three services are defined: | name | access | credentials | description | |------|-------------------------------------------------|----------------------------|----------------------------------------| -| web | http://localhost:3000 | N/A | A container running the docs.rs binary | -| db | postgresql://cratesfyi:password@localhost:15432 | - | Postgres database used by web | -| s3 | http://localhost:9000 | `cratesfyi` - `secret_key` | MinIO (simulates AWS S3) used by web | +| web | http://0.0.0.0:3000 | N/A | A container running the docs.rs binary | +| db | postgresql://cratesfyi:password@127.0.0.1:15432 | - | Postgres database used by web | +| s3 | http://127.0.0.1:9000 | `cratesfyi` - `secret_key` | MinIO (simulates AWS S3) used by web | [docker-compose.yml]: ./docker-compose.yml diff --git a/docker-compose.yml b/docker-compose.yml index 78ad3cbad..7ba45c05a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -51,7 +51,7 @@ services: POSTGRES_PASSWORD: password ports: # Use a non-standard port on the host to avoid conflicting with existing postgres servers - - "15432:5432" + - "127.0.0.1:15432:5432" healthcheck: test: ["CMD", "pg_isready", "--username", "cratesfyi"] interval: 10s @@ -66,8 +66,8 @@ services: minio server /data --console-address ":9001"; " ports: - - "9000:9000" - - "9001:9001" + - "127.0.0.1:9000:9000" + - "127.0.0.1:9001:9001" volumes: - minio-data:/data environment: @@ -91,7 +91,7 @@ services: context: ./dockerfiles dockerfile: ./Dockerfile-prometheus ports: - - "9090:9090" + - "127.0.0.1:9090:9090" healthcheck: test: ["CMD", "curl", "--silent", "--fail", "localhost:9090/-/ready"] From c73868f2e87b834ca955da5c734d9f23b04ac0dd Mon Sep 17 00:00:00 2001 From: Wim Looman Date: Wed, 15 Nov 2023 15:53:34 +0100 Subject: [PATCH 4/7] Run separate web server, registry watcher and build servers in docker-compose --- .github/workflows/ci.yml | 35 ++------ .github/workflows/docker.yml | 6 +- .gitignore | 1 + README.md | 39 +++++--- docker-compose.yml | 166 +++++++++++++++++++++++++---------- dockerfiles/Dockerfile | 88 ++++++++++++++----- dockerfiles/entrypoint.sh | 38 -------- 7 files changed, 218 insertions(+), 155 deletions(-) delete mode 100755 dockerfiles/entrypoint.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4603d797f..70d3a8e7c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,13 +44,8 @@ jobs: - name: Launch postgres run: | - cp .env.sample .env - mkdir -p ${DOCSRS_PREFIX}/public-html - docker compose up -d db - # Give the database enough time to start up - sleep 5 - # Make sure the database is actually working - psql "${DOCSRS_DATABASE_URL}" + touch .docker.env + docker compose up --wait --wait-timeout 30 db - name: install SQLX CLI run: cargo install sqlx-cli --no-default-features --features postgres @@ -68,9 +63,6 @@ jobs: --database-url $DOCSRS_DATABASE_URL \ --target-version 0 - - name: Clean up the database - run: docker compose down --volumes - test: env: SQLX_OFFLINE: 1 @@ -88,13 +80,8 @@ jobs: - name: Launch postgres and min.io run: | - cp .env.sample .env - mkdir -p ${DOCSRS_PREFIX}/public-html - docker compose up -d db s3 - # Give the database enough time to start up - sleep 5 - # Make sure the database is actually working - psql "${DOCSRS_DATABASE_URL}" + touch .docker.env + docker compose up --wait --wait-timeout 30 db s3 - name: run workspace tests run: | @@ -106,9 +93,6 @@ jobs: run: | cargo test --locked -- --ignored --test-threads=1 - - name: Clean up the database - run: docker compose down --volumes - GUI_test: runs-on: ubuntu-latest steps: @@ -121,19 +105,12 @@ jobs: - name: Launch postgres and min.io run: | - cp .env.sample .env - mkdir -p ${DOCSRS_PREFIX}/public-html - docker compose up -d db s3 - # Give the database enough time to start up - sleep 5 - # Make sure the database is actually working - psql "${DOCSRS_DATABASE_URL}" + touch .docker.env + docker compose up --wait --wait-timeout 30 db s3 - name: Run GUI tests run: ./dockerfiles/run-gui-tests.sh - - name: Clean up the database - run: docker compose down --volumes fmt: name: Rustfmt diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 10c3c03b9..e81ed1f0b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -9,5 +9,7 @@ jobs: steps: - uses: actions/checkout@v5 - - name: Build the Docker image - run: docker build -t docs-rs -f dockerfiles/Dockerfile . + - run: docker build --target web-server -f dockerfiles/Dockerfile . + - run: docker build --target build-server -f dockerfiles/Dockerfile . + - run: docker build --target registry-watcher -f dockerfiles/Dockerfile . + - run: docker build --target cli -f dockerfiles/Dockerfile . diff --git a/.gitignore b/.gitignore index 99643b163..9d9ef06ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /ignored /.env +/.docker.env /src/web/badge/Cargo.lock target *.css diff --git a/README.md b/README.md index 894e7d56a..b78180783 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ mkdir -p ignored/cratesfyi-prefix/crates.io-index # Builds the docs.rs binary SQLX_OFFLINE=1 cargo build # Start the external services. -docker compose up -d db s3 +docker compose up --wait db s3 # anything that doesn't run via docker-compose needs the settings defined in # .env. Either via `. ./.env` as below, or via any dotenv shell integration. . ./.env @@ -133,21 +133,32 @@ which uses docker-compose for the web server as well. This will not cache dependencies - in particular, you'll have to rebuild all 400 whenever the lockfile changes - but makes sure that you're in a known environment so you should have fewer problems getting started. -You can also use the `web` container to run builds on systems which don't support running builds directly (mostly on Mac OS or Windows): +You'll need to `touch .docker.env` first, this file can have any environment +variable overrides you want to use in docker containers. + +You can also use the `builder-a` container to run builds on systems which don't support running builds directly (mostly on Mac OS or Windows): + ```sh +# update the toolchain +docker compose run --rm builder-a build update-toolchain # run a build for a single crate -docker compose run web build crate regex 1.3.1 -# or build essential files -docker compose run web build add-essential-files -# rebuild the web container when you changed code. -docker compose up -d web --build +docker compose run --rm builder-a build crate regex 1.3.1 +# rebuild containers when you changed code. +docker compose up --wait --build ``` -You can also run other commands like the setup above from within the container: +You can also run other non-build commands like the setup steps above, or queueing crates for the background builders from within the `cli` container: ```sh docker compose run --rm cli database migrate -docker compose run --rm cli build update-toolchain +docker compose run --rm cli queue add regex 1.3.1 +``` + +If the command needs the crates.io-index clone then it must be run from within +a `registry-watcher` container: + +```sh +docker compose run --rm registry-watcher queue set-last-seen-reference --head ``` Note that running tests is not supported when using pure docker-compose. @@ -169,7 +180,7 @@ Three services are defined: #### Rebuilding Containers -To rebuild the site, run `docker compose build`. +To rebuild the site, run `docker compose --profile all build`. Note that docker-compose caches the build even if you change the source code, so this will be necessary anytime you make changes. @@ -190,7 +201,7 @@ This is probably because you have `git.autocrlf` set to true, ##### I see the error `/opt/rustwide/cargo-home/bin/cargo: cannot execute binary file: Exec format error` when running builds. -You are most likely not on a Linux platform. Running builds directly is only supported on `x86_64-unknown-linux-gnu`. On other platforms you can use the `docker compose run web build [...]` workaround described above. +You are most likely not on a Linux platform. Running builds directly is only supported on `x86_64-unknown-linux-gnu`. On other platforms you can use the `docker compose run --rm builder-a build [...]` workaround described above. See [rustwide#41](https://github.com/rust-lang/rustwide/issues/41) for more details about supporting more platforms directly. @@ -218,11 +229,11 @@ cargo run -- start-web-server ```sh # Builds and adds it into database # This is the main command to build and add a documentation into docs.rs. -# For example, `docker compose run web build crate regex 1.1.6` +# For example, `docker compose run --rm builder-a build crate regex 1.1.6` cargo run -- build crate -# alternatively, via the web container -docker compose run web build crate +# alternatively, within docker-compose containers +docker compose run --rm builder-a build crate # Builds every crate on crates.io and adds them into database # (beware: this may take months to finish) diff --git a/docker-compose.yml b/docker-compose.yml index 7ba45c05a..935f58048 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,44 +1,126 @@ -version: "3" +x-healthcheck: &healthcheck-interval + interval: 10s + timeout: 1s + start_interval: 1s + start_period: 10s + +x-environment: &environment + RUST_BACKTRACE: true + + DOCSRS_PREFIX: /opt/docsrs/prefix + DOCSRS_COMPILER_METRICS_PATH: /opt/docsrs/prefix/metrics + + DOCSRS_DATABASE_URL: postgresql://cratesfyi:password@db + DOCSRS_MIN_POOL_IDLE: 1 + DOCSRS_MIN_POOL_SIZE: 2 + DOCSRS_MAX_POOL_SIZE: 10 + DOCSRS_MAX_LEGACY_POOL_SIZE: 10 + + DOCSRS_STORAGE_BACKEND: s3 + + S3_ENDPOINT: http://s3:9000 + AWS_ACCESS_KEY_ID: cratesfyi + AWS_SECRET_ACCESS_KEY: secret_key + + DOCSRS_RENDER_THREADS: 2 + + DOCSRS_RUSTWIDE_WORKSPACE: /opt/docsrs/rustwide + DOCSRS_DOCKER: true + DOCSRS_DOCKER_IMAGE: ghcr.io/rust-lang/crates-build-env/linux-micro + DOCSRS_BUILD_CPU_LIMIT: 2 + DOCSRS_INCLUDE_DEFAULT_TARGETS: false + +x-build: &build + context: . + dockerfile: ./dockerfiles/Dockerfile + args: + PROFILE: dev + PROFILE_DIR: debug + +x-builder: &builder + build: + <<: *build + target: build-server + platform: "linux/amd64" + depends_on: + - db + - s3 + environment: *environment + env_file: + - .docker.env + healthcheck: + <<: *healthcheck-interval + test: curl --silent --fail localhost:3000/about/metrics + services: web: build: - context: . - dockerfile: ./dockerfiles/Dockerfile - args: - PROFILE: dev - PROFILE_DIR: debug + <<: *build + target: web-server platform: "linux/amd64" depends_on: - db - s3 ports: - - "3000:3000" - # for metrics - expose: ["3000"] + - "3000:80" + environment: *environment + env_file: + - .docker.env + healthcheck: + <<: *healthcheck-interval + test: curl --silent --fail localhost:80/about/metrics + + # Include the registry watcher with + # `docker compose --profile watch up --build --wait` + registry-watcher: + build: + <<: *build + target: registry-watcher + platform: "linux/amd64" + depends_on: + - db volumes: - - "/var/run/docker.sock:/var/run/docker.sock" - - ".rustwide-docker:/opt/docsrs/rustwide" - "cratesio-index:/opt/docsrs/prefix/crates.io-index" - - "./ignored/cratesfyi-prefix/metrics:/opt/docsrs/prefix/metrics" - "./static:/opt/docsrs/static:ro" - environment: - DOCSRS_RUSTWIDE_WORKSPACE: /opt/docsrs/rustwide - DOCSRS_COMPILER_METRICS_PATH: /opt/docsrs/prefix/metrics - DOCSRS_DATABASE_URL: postgresql://cratesfyi:password@db - DOCSRS_STORAGE_BACKEND: s3 - S3_ENDPOINT: http://s3:9000 - AWS_ACCESS_KEY_ID: cratesfyi - AWS_SECRET_ACCESS_KEY: secret_key - DOCSRS_MAX_LEGACY_POOL_SIZE: 10 - DOCSRS_MAX_POOL_SIZE: 10 - DOCSRS_MIN_POOL_IDLE: 1 + environment: *environment env_file: - - .env + - .docker.env + profiles: + - watch + - all healthcheck: - test: ["CMD", "curl", "--silent", "--fail", "localhost:3000"] - interval: 10s - timeout: 5s - retries: 10 + <<: *healthcheck-interval + test: curl --silent --fail localhost:3000/about/metrics + + builder-a: + <<: *builder + volumes: + - ".rustwide-docker/builder-a:/opt/docsrs/rustwide" + - "./ignored/cratesfyi-prefix/metrics:/opt/docsrs/prefix/metrics" + - "/var/run/docker.sock:/var/run/docker.sock" + + builder-b: + <<: *builder + volumes: + - ".rustwide-docker/builder-b:/opt/docsrs/rustwide" + - "./ignored/cratesfyi-prefix/metrics:/opt/docsrs/prefix/metrics" + - "/var/run/docker.sock:/var/run/docker.sock" + + cli: + build: + <<: *build + target: cli + platform: "linux/amd64" + depends_on: + - db + - s3 + volumes: + - "cratesio-index:/opt/docsrs/prefix/crates.io-index" + environment: *environment + env_file: + - .docker.env + profiles: + - all db: build: @@ -53,10 +135,8 @@ services: # Use a non-standard port on the host to avoid conflicting with existing postgres servers - "127.0.0.1:15432:5432" healthcheck: - test: ["CMD", "pg_isready", "--username", "cratesfyi"] - interval: 10s - timeout: 5s - retries: 10 + <<: *healthcheck-interval + test: pg_isready --username cratesfyi s3: image: minio/minio @@ -74,17 +154,8 @@ services: MINIO_ROOT_USER: cratesfyi MINIO_ROOT_PASSWORD: secret_key healthcheck: - test: - [ - "CMD", - "curl", - "--silent", - "--fail", - "localhost:9000/minio/health/ready", - ] - interval: 10s - timeout: 5s - retries: 10 + <<: *healthcheck-interval + test: mc ready local prometheus: build: @@ -93,11 +164,8 @@ services: ports: - "127.0.0.1:9090:9090" healthcheck: - test: - ["CMD", "curl", "--silent", "--fail", "localhost:9090/-/ready"] - interval: 10s - timeout: 5s - retries: 10 + <<: *healthcheck-interval + test: promtool check healthy gui_tests: build: @@ -108,6 +176,8 @@ services: - "host.docker.internal:host-gateway" volumes: - "${PWD}:/build/out" + profiles: + - all volumes: postgres-data: {} diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index a9b207da8..ecf272747 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -73,44 +73,84 @@ RUN apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ && DEBIAN_FRONTEND=noninteractive apt-get install -y \ ca-certificates \ + curl \ tini \ && rm -rf /var/lib/apt/lists/* +WORKDIR /srv/docsrs + +# Tini is a small init binary to properly handle signals +ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] +CMD ["start-web-server", "0.0.0.0:80"] + ARG PROFILE_DIR=release COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin COPY static /srv/docsrs/static COPY templates /srv/docsrs/templates COPY vendor /srv/docsrs/vendor -WORKDIR /srv/docsrs +######################## +# Build server stage # +######################## + +FROM ubuntu:22.04 AS build-server + +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + ca-certificates \ + tini \ + curl \ + docker.io \ + build-essential \ + gcc \ + pkg-config \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + # Tini is a small init binary to properly handle signals -CMD ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "start-web-server", "0.0.0.0:80"] +ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] +CMD ["start-build-server"] -################## -# Output stage # -################## +ARG PROFILE_DIR=release +COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin + +############################ +# Registry watcher stage # +############################ -FROM ubuntu:24.04 AS output +FROM ubuntu:22.04 AS registry-watcher -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ - git \ - libmagic1 \ - docker.io \ - ca-certificates \ - build-essential \ - gcc \ - pkg-config \ - libssl-dev +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + ca-certificates \ + tini \ + curl \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Tini is a small init binary to properly handle signals +ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] +CMD ["start-registry-watcher", "--repository-stats-updater=enabled", "--cdn-invalidator=enabled"] + +ARG PROFILE_DIR=release +COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin + +############### +# CLI stage # +############### + +FROM ubuntu:22.04 AS cli + +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + ca-certificates \ + tini \ + && rm -rf /var/lib/apt/lists/* -RUN mkdir -p /opt/docsrs/prefix +ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] ARG PROFILE_DIR=release COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin -COPY static /opt/docsrs/static -COPY templates /opt/docsrs/templates -COPY dockerfiles/entrypoint.sh /opt/docsrs/ -COPY vendor /opt/docsrs/vendor - -WORKDIR /opt/docsrs -ENTRYPOINT ["/opt/docsrs/entrypoint.sh"] -CMD ["daemon", "--registry-watcher=disabled"] diff --git a/dockerfiles/entrypoint.sh b/dockerfiles/entrypoint.sh deleted file mode 100755 index b8fdc61aa..000000000 --- a/dockerfiles/entrypoint.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -set -euv - -export DOCSRS_PREFIX=/opt/docsrs/prefix -export DOCSRS_DOCKER=true -export DOCSRS_LOG=${DOCSRS_LOG-"docs-rs,rustwide=info"} -export PATH="$PATH:/build/target/release" - -# Try migrating the database multiple times if it fails -# This avoids the docker container crashing the first time it's started with -# docker-compose, as PostgreSQL needs some time to initialize. -set +e -failed=0 -while true; do - if ! cratesfyi database migrate; then - ((failed=failed + 1)) - if [ "${failed}" -eq 5 ]; then - exit 1 - fi - echo "failed to migrate the database" - echo "waiting 1 second..." - sleep 1 - else - break - fi -done -set -e - -if ! [ -d "${DOCSRS_PREFIX}/crates.io-index/.git" ]; then - git clone ${REGISTRY_URL:-https://github.com/rust-lang/crates.io-index} "${DOCSRS_PREFIX}/crates.io-index" - # Prevent new crates built before the container creation to be built - git --git-dir="$DOCSRS_PREFIX/crates.io-index/.git" branch crates-index-diff_last-seen -fi - -cratesfyi build update-toolchain --only-first-time - -cratesfyi "$@" From 574ceb36de8647e8c26067292e6cf16aca0baf5f Mon Sep 17 00:00:00 2001 From: Wim Looman Date: Tue, 19 Mar 2024 14:59:29 +0100 Subject: [PATCH 5/7] Ignore more files that are irrelevant to building a docker image --- .dockerignore | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index b96afe2a7..29b8ca6ad 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,12 @@ +/target/ /.rustwide /.rustwide-docker +/Justfile +/LICENSE +/README.md +/docker-compose.yml +/docs/ /ignored -**/target +/mcps +/triagebot.toml +/clippy.toml From 7ba3b8ee30333ada100dbcd88a6fd7c4f6448ff9 Mon Sep 17 00:00:00 2001 From: Wim Looman Date: Sat, 17 May 2025 12:46:21 +0200 Subject: [PATCH 6/7] Better document initializing the docker-compose environment --- Justfile | 27 +++++++++++++++++++++++++++ README.md | 24 ++++++++++++++++++++---- 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/Justfile b/Justfile index a767a7979..17bb484d6 100644 --- a/Justfile +++ b/Justfile @@ -19,3 +19,30 @@ lint-fix: lint-js *args: deno run -A npm:eslint@9 static templates gui-tests eslint.config.js {{ args }} + +# Initialize the docker compose database +[group('compose')] +compose-migrate: + docker compose run --build --rm cli database migrate + +# Update last seen reference to the current index head, to only build newly published crates +[group('compose')] +compose-queue-head: + docker compose run --build --rm cli queue set-last-seen-reference --head + +# Launch base docker services, ensuring the database is migrated +[group('compose')] +compose-up: + just compose-migrate + docker compose up --build -d + +# Launch base docker services and registry watcher, ensuring the database is migrated +[group('compose')] +compose-up-watch: + just compose-migrate + docker compose --profile watch up --build -d + +# Shutdown docker services and cleanup all temporary volumes +[group('compose')] +compose-down: + docker compose --profile all down --volumes --remove-orphans diff --git a/README.md b/README.md index b78180783..881d5eabe 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,13 @@ This will not cache dependencies - in particular, you'll have to rebuild all 400 but makes sure that you're in a known environment so you should have fewer problems getting started. You'll need to `touch .docker.env` first, this file can have any environment -variable overrides you want to use in docker containers. +variable overrides you want to use in docker containers. Then run the migrations +before launching the main services: + +```sh +docker compose run --build --rm cli database migrate +docker compose up --build -d +``` You can also use the `builder-a` container to run builds on systems which don't support running builds directly (mostly on Mac OS or Windows): @@ -154,15 +160,25 @@ docker compose run --rm cli database migrate docker compose run --rm cli queue add regex 1.3.1 ``` -If the command needs the crates.io-index clone then it must be run from within -a `registry-watcher` container: +If you want to run the registry watcher, you'll need to first set the "last seen +reference" from the registry index, e.g. to set it to the current head so only +newly published crates are built: + +```sh +docker compose run --rm cli queue set-last-seen-reference --head +``` + +Then enable the docker-compose profile that includes the watcher: ```sh -docker compose run --rm registry-watcher queue set-last-seen-reference --head +docker compose --profile watch up --build -d ``` Note that running tests is not supported when using pure docker-compose. +Some of the above commands are included in the `Justfile` for ease of use, +check the `[compose]` group in `just --list`. + Please file bugs for any trouble you have running docs.rs! ### Docker-Compose From da1c3c2d1d083737b0c7035b3dbdd58a3fdd371b Mon Sep 17 00:00:00 2001 From: Denis Cornehl Date: Fri, 14 Nov 2025 17:35:44 +0100 Subject: [PATCH 7/7] extend&fix Dockerfile, extended compose.yml, new just recipes --- .docker.env.sample | 12 ++ .dockerignore | 10 +- .editorconfig | 4 + .env.sample | 31 ++-- .git_hooks/pre-commit | 17 --- .git_hooks/pre-push | 17 --- .github/workflows/ci.yml | 130 +++++++---------- .github/workflows/docker.yml | 42 +++++- .gitignore | 2 +- Justfile | 64 +++------ README.md | 76 +++++----- build.rs | 25 ++-- docker-compose.yml | 224 +++++++++++++++++++++++------ dockerfiles/Dockerfile | 132 +++++++++++------ dockerfiles/Dockerfile-gui-tests | 44 +++--- dockerfiles/prometheus.yml | 2 +- dockerfiles/run-gui-tests.sh | 44 ------ justfiles/cli.just | 97 +++++++++++++ justfiles/services.just | 64 +++++++++ justfiles/testing.just | 117 +++++++++++++++ justfiles/utils.just | 9 ++ src/bin/cratesfyi.rs | 24 ++-- src/build_queue.rs | 51 ++----- src/context.rs | 8 +- src/docbuilder/rustwide_builder.rs | 106 +++++++++++++- src/index.rs | 5 + src/lib.rs | 15 +- src/registry_api.rs | 8 +- src/repositories/github.rs | 8 +- src/repositories/gitlab.rs | 8 +- src/repositories/mod.rs | 6 - src/utils/daemon.rs | 8 +- src/utils/queue_builder.rs | 5 +- 33 files changed, 952 insertions(+), 463 deletions(-) create mode 100644 .docker.env.sample delete mode 100755 .git_hooks/pre-commit delete mode 100755 .git_hooks/pre-push delete mode 100755 dockerfiles/run-gui-tests.sh create mode 100644 justfiles/cli.just create mode 100644 justfiles/services.just create mode 100644 justfiles/testing.just create mode 100644 justfiles/utils.just diff --git a/.docker.env.sample b/.docker.env.sample new file mode 100644 index 000000000..743cc0613 --- /dev/null +++ b/.docker.env.sample @@ -0,0 +1,12 @@ +# environment overrides for the app instances running inside docker compose +DOCSRS_LOG=docs_rs=debug,rustwide=info +DOCSRS_INCLUDE_DEFAULT_TARGETS=false +DOCSRS_DOCKER_IMAGE=ghcr.io/rust-lang/crates-build-env/linux-micro +# To build with a PR that hasn't landed in a rust dist toolchain yet, +# you can set this to the git sha of a try build: +# https://forge.rust-lang.org/infra/docs/rustc-ci.html#try-builds +DOCSRS_TOOLCHAIN=nightly + +# for the registry watcher, automatically queued reqbuidls. +DOCSRS_MAX_QUEUED_REBUILDS: 10 + diff --git a/.dockerignore b/.dockerignore index 29b8ca6ad..1f8c1f086 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,12 +1,18 @@ -/target/ +/.docker.env +/.env.* +/.envrc /.rustwide /.rustwide-docker /Justfile /LICENSE /README.md +/clippy.toml /docker-compose.yml +/dockerfiles/ /docs/ /ignored +/justfiles/ /mcps +/target/ /triagebot.toml -/clippy.toml +archive_cache diff --git a/.editorconfig b/.editorconfig index da7d8221e..d9432f1f2 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,5 +8,9 @@ insert_final_newline = true indent_style = space indent_size = 4 +[Justfile] +indent_size = 2 + [*.js] max_line_length = 100 + diff --git a/.env.sample b/.env.sample index a6e3ae856..d9c9d3e72 100644 --- a/.env.sample +++ b/.env.sample @@ -1,18 +1,25 @@ -export DOCSRS_PREFIX=ignored/cratesfyi-prefix -export DOCSRS_DATABASE_URL=postgresql://cratesfyi:password@localhost:15432 +# environment overrides for local development outside docker. +DOCSRS_PREFIX=ignored/cratesfyi-prefix +DOCSRS_DATABASE_URL=postgresql://cratesfyi:password@localhost:15432 # for local development with sqlx -export DATABASE_URL=postgresql://cratesfyi:password@localhost:15432 +DATABASE_URL=postgresql://cratesfyi:password@localhost:15432 -export DOCSRS_LOG=docs_rs=debug,rustwide=info +DOCSRS_LOG=docs_rs=debug,rustwide=info +AWS_ACCESS_KEY_ID=cratesfyi +AWS_SECRET_ACCESS_KEY=secret_key +S3_ENDPOINT=http://localhost:9000 +DOCSRS_S3_STATIC_ROOT_PATH=http://localhost:9000/rust-docs-rs + + +# build specific +DOCSRS_INCLUDE_DEFAULT_TARGETS=false +DOCSRS_DOCKER_IMAGE=ghcr.io/rust-lang/crates-build-env/linux-micro +SENTRY_ENVIRONMENT=dev # To build with a PR that hasn't landed in a rust dist toolchain yet, # you can set this to the git sha of a try build: # https://forge.rust-lang.org/infra/docs/rustc-ci.html#try-builds -export DOCSRS_TOOLCHAIN=nightly -export AWS_ACCESS_KEY_ID=cratesfyi -export AWS_SECRET_ACCESS_KEY=secret_key -export S3_ENDPOINT=http://localhost:9000 -export DOCSRS_S3_STATIC_ROOT_PATH=http://localhost:9000/rust-docs-rs -export DOCSRS_INCLUDE_DEFAULT_TARGETS=false -export DOCSRS_DOCKER_IMAGE=ghcr.io/rust-lang/crates-build-env/linux-micro -export SENTRY_ENVIRONMENT=dev +DOCSRS_TOOLCHAIN=nightly + +# NOTE: when running services in docker-compose, you can override the settings in +# `.docker.env`, you'll fine an example in `.docker.env.sample`. diff --git a/.git_hooks/pre-commit b/.git_hooks/pre-commit deleted file mode 100755 index 0c7b0eb40..000000000 --- a/.git_hooks/pre-commit +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -if ! cargo fmt -- --check ; then - printf "\n" - printf "\033[0;31mpre-commit hook failed during:\033[0m\n" - printf "\033[0;31m\tcargo fmt -- --check\033[0m\n" - exit 1 -fi - -if ! cargo clippy --locked -- -D warnings ; then - printf "\n" - printf "\033[0;31mpre-commit hook failed during:\033[0m\n" - printf "\033[0;31m\tclippy --locked -- -D warning\033[0m\n" - exit 1 -fi - -printf "\n" -printf "\033[0;32mpre-commit hook succeeded\033[0m\n" diff --git a/.git_hooks/pre-push b/.git_hooks/pre-push deleted file mode 100755 index a8646e6b0..000000000 --- a/.git_hooks/pre-push +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -fd_hard_limit="$(ulimit -n -H)" -if [ "$fd_hard_limit" = "unlimited" ] || [ "$fd_hard_limit" -ge 4096 ] ; then - ulimit -n 4096 -else - ulimit -n "$fd_hard_limit" -fi - -if ! cargo test ; then - printf "\n" - printf "\033[0;31mpre-push hook failed during:\033[0m\n" - printf "\033[0;31m\tcargo test\033[0m\n" - exit 1 -fi - -printf "\n" -printf "\033[0;32mpre-push hook succeeded\033[0m\n" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 70d3a8e7c..9a214404c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,17 +9,7 @@ on: - cron: "0 0 * * *" env: - RUST_BACKTRACE: 1 RUST_CACHE_KEY: rust-cache-20241114 - DOCSRS_PREFIX: ignored/cratesfyi-prefix - DOCSRS_DATABASE_URL: postgresql://cratesfyi:password@localhost:15432 - DOCSRS_LOG: docs_rs=debug,rustwide=info - AWS_ACCESS_KEY_ID: cratesfyi - AWS_SECRET_ACCESS_KEY: secret_key - S3_ENDPOINT: http://localhost:9000 - DOCSRS_INCLUDE_DEFAULT_TARGETS: false - DOCSRS_DOCKER_IMAGE: ghcr.io/rust-lang/crates-build-env/linux-micro - SENTRY_ENVIRONMENT: dev jobs: sqlx: @@ -27,112 +17,80 @@ jobs: steps: - uses: actions/checkout@v5 - # We check that all github actions workflows have valid syntax - - name: Validate YAML file - uses: raven-actions/actionlint@v2 - with: - files: .github/workflow/* - flags: "-ignore SC2086" # ignore some shellcheck errors - - name: install `just` run: sudo snap install --edge --classic just + - uses: cargo-bins/cargo-binstall@main + - name: restore build & cargo cache uses: Swatinem/rust-cache@v2 with: prefix-key: ${{ env.RUST_CACHE_KEY }} - - name: Launch postgres - run: | - touch .docker.env - docker compose up --wait --wait-timeout 30 db + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - name: install SQLX CLI - run: cargo install sqlx-cli --no-default-features --features postgres - - - name: run database migrations - run: cargo sqlx migrate run --database-url $DOCSRS_DATABASE_URL + run: cargo binstall sqlx-cli - - name: run sqlx prepare --check - run: just sqlx-prepare - - - name: test reverse migrations + - name: run sqlx migration up & down run: | - # --target 0 means "revert everything" - cargo sqlx migrate revert \ - --database-url $DOCSRS_DATABASE_URL \ - --target-version 0 + just sqlx-migrate-run \ + sqlx-check \ + sqlx-migrate-revert + + - name: shut down test environment + if: ${{ always() }} + run: just compose-down-and-wipe test: - env: - SQLX_OFFLINE: 1 runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 + - name: install `just` + run: sudo snap install --edge --classic just + - name: restore build & cargo cache uses: Swatinem/rust-cache@v2 with: prefix-key: ${{ env.RUST_CACHE_KEY }} - - name: Build - run: cargo build --workspace --locked - - - name: Launch postgres and min.io - run: | - touch .docker.env - docker compose up --wait --wait-timeout 30 db s3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - - name: run workspace tests - run: | - cargo test --workspace --locked --no-fail-fast + - name: run tests + run: just run-tests run-builder-tests - - name: run slow tests - env: - DOCSRS_INCLUDE_DEFAULT_TARGETS: true - run: | - cargo test --locked -- --ignored --test-threads=1 + - name: shut down test environment + if: ${{ always() }} + run: just compose-down-and-wipe GUI_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - name: restore build & cargo cache - uses: Swatinem/rust-cache@v2 - with: - prefix-key: ${{ env.RUST_CACHE_KEY }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - - name: Launch postgres and min.io - run: | - touch .docker.env - docker compose up --wait --wait-timeout 30 db s3 + - name: install `just` + run: sudo snap install --edge --classic just - name: Run GUI tests - run: ./dockerfiles/run-gui-tests.sh - - - fmt: - name: Rustfmt - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v5 - - name: update rust toolchain - run: rustup component add rustfmt + run: just run-gui-tests - - run: cargo fmt -- --check + - name: shut down test environment + if: ${{ always() }} + run: just compose-down-and-wipe - clippy: - name: Clippy + lint-rs: + name: rust linters runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - name: update rust toolchain - run: rustup component add clippy - - name: install `just` run: sudo snap install --edge --classic just @@ -141,13 +99,33 @@ jobs: with: prefix-key: ${{ env.RUST_CACHE_KEY }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - run: just lint - eslint: + lint-js: + name: js linters runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 + - uses: denoland/setup-deno@v2 + - name: install `just` run: sudo snap install --edge --classic just + - run: just lint-js + + lint-actions: + name: gh actions linters + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + + # We check that all github actions workflows have valid syntax + - name: Validate YAML file + uses: raven-actions/actionlint@v2 + with: + files: .github/workflow/* + flags: "-ignore SC2086" # ignore some shellcheck errors diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index e81ed1f0b..0c830c25f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,15 +1,47 @@ name: Docker -on: [push, pull_request] +on: + push: + branches: + - master + pull_request: + schedule: + - cron: "0 0 * * *" jobs: docker: + strategy: + matrix: + target: [ + "web-server", + "build-server", + "registry-watcher", + "cli" + ] name: Test docker image builds runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - run: docker build --target web-server -f dockerfiles/Dockerfile . - - run: docker build --target build-server -f dockerfiles/Dockerfile . - - run: docker build --target registry-watcher -f dockerfiles/Dockerfile . - - run: docker build --target cli -f dockerfiles/Dockerfile . + - name: setup docker buildx + uses: docker/setup-buildx-action@v3 + + - name: build docker image + uses: docker/build-push-action@v6 + with: + context: . + file: "./dockerfiles/Dockerfile" + platforms: linux/amd64 + target: ${{ matrix.target }} + build-args: | + GIT_SHA=${{ github.sha }} + PROFILE=release + PROFILE_DIR=release + load: true + cache-from: type=gha + cache-to: type=gha,mode=max + push: false + + # TODO: later we would set `push: true` and also provide nice tags + # for the images. + # Unclear is how the deploy would work then. diff --git a/.gitignore b/.gitignore index 9d9ef06ba..3ab5f4610 100644 --- a/.gitignore +++ b/.gitignore @@ -11,5 +11,5 @@ target .vagrant .rustwide .rustwide-docker -.archive_cache +archive_cache .workspace diff --git a/Justfile b/Justfile index 17bb484d6..732b133e4 100644 --- a/Justfile +++ b/Justfile @@ -1,48 +1,26 @@ -# List available commands -_default: - just --list - -sqlx-prepare ADDITIONAL_ARGS="": - cargo sqlx prepare \ - --database-url $DOCSRS_DATABASE_URL \ - --workspace {{ ADDITIONAL_ARGS }} \ - -- --all-targets --all-features - -sqlx-check: - just sqlx-prepare "--check" - -lint *args: - cargo clippy --all-features --all-targets --workspace --locked {{ args }} -- -D warnings +set shell := ["bash", "-Eeuo", "pipefail", "-c"] +set ignore-comments +set dotenv-load := true +set dotenv-override := true -lint-fix: - just lint --fix --allow-dirty --allow-staged +# minimal settings to run justfile recipes that don't just run docker, like `run-tests`. +# Typically you will want to create your own `.env` file based on `.env.sample` for +# easier local development. +export DOCSRS_PREFIX := env("DOCSRS_PREFIX", "ignored/cratesfyi-prefix") +export DOCSRS_DATABASE_URL := env("DOCSRS_DATABASE_URL", "postgresql://cratesfyi:password@localhost:15432") +export AWS_ACCESS_KEY_ID := env("AWS_ACCESS_KEY_ID", "cratesfyi") +export AWS_SECRET_ACCESS_KEY := env("AWS_SECRET_ACCESS_KEY", "secret_key") +export S3_ENDPOINT := env("S3_ENDPOINT", "http://localhost:9000") -lint-js *args: - deno run -A npm:eslint@9 static templates gui-tests eslint.config.js {{ args }} -# Initialize the docker compose database -[group('compose')] -compose-migrate: - docker compose run --build --rm cli database migrate - -# Update last seen reference to the current index head, to only build newly published crates -[group('compose')] -compose-queue-head: - docker compose run --build --rm cli queue set-last-seen-reference --head - -# Launch base docker services, ensuring the database is migrated -[group('compose')] -compose-up: - just compose-migrate - docker compose up --build -d +# List available commands +_default: + @just --list -# Launch base docker services and registry watcher, ensuring the database is migrated -[group('compose')] -compose-up-watch: - just compose-migrate - docker compose --profile watch up --build -d +import 'justfiles/cli.just' +import 'justfiles/utils.just' +import 'justfiles/services.just' +import 'justfiles/testing.just' -# Shutdown docker services and cleanup all temporary volumes -[group('compose')] -compose-down: - docker compose --profile all down --volumes --remove-orphans +psql: + psql $DOCSRS_DATABASE_URL diff --git a/README.md b/README.md index 881d5eabe..dc5c8f114 100644 --- a/README.md +++ b/README.md @@ -28,15 +28,6 @@ The recommended way to develop docs.rs is a combination of `cargo run` for the main binary and [docker-compose](https://docs.docker.com/compose/) for the external services. This gives you reasonable incremental build times without having to add new users and packages to your host machine. -### Git Hooks - -For ease of use, `git_hooks` directory contains useful `git hooks` to make your development easier. - -```bash -# Unix -cd .git/hooks && ln -s ../../.git_hooks/* . && cd ../.. -# Powershell -cd .git/hooks && New-Item -Path ../../.git_hooks/* -ItemType SymbolicLink -Value . && cd ../.. ``` ### Dependencies @@ -108,7 +99,7 @@ cargo test To run GUI tests: ``` -./dockerfiles/run-gui-tests.sh +just run-gui-tests ``` They use the [browser-ui-test](https://github.com/GuillaumeGomez/browser-UI-test/) framework. You @@ -128,53 +119,57 @@ npm install browser-ui-test ### Pure docker-compose -If you have trouble with the above commands, consider using `docker compose up --build`, +If you have trouble with the above commands, consider using `just compose-up-web`, which uses docker-compose for the web server as well. This will not cache dependencies - in particular, you'll have to rebuild all 400 whenever the lockfile changes - but makes sure that you're in a known environment so you should have fewer problems getting started. -You'll need to `touch .docker.env` first, this file can have any environment -variable overrides you want to use in docker containers. Then run the migrations -before launching the main services: +You can put environment overrides for the docker containers into `.docker.env`, +first. The migrations will be run by our just recipes when needed. ```sh -docker compose run --build --rm cli database migrate -docker compose up --build -d +just cli-db-migrate +just compose-up-web ``` -You can also use the `builder-a` container to run builds on systems which don't support running builds directly (mostly on Mac OS or Windows): +You can also use the `builder` compose profile to run builds on systems which don't support running builds directly (mostly on Mac OS or Windows): ```sh +just compose-up-builder + +# and if needed + # update the toolchain -docker compose run --rm builder-a build update-toolchain +just cli-build-update-toolchain + # run a build for a single crate -docker compose run --rm builder-a build crate regex 1.3.1 -# rebuild containers when you changed code. -docker compose up --wait --build +just cli-build-crate regex 1.3.1 ``` You can also run other non-build commands like the setup steps above, or queueing crates for the background builders from within the `cli` container: ```sh -docker compose run --rm cli database migrate -docker compose run --rm cli queue add regex 1.3.1 +just cli-db-migrate +just cli-queue-add regex 1.3.1 ``` -If you want to run the registry watcher, you'll need to first set the "last seen -reference" from the registry index, e.g. to set it to the current head so only -newly published crates are built: - +If you want to run the registry watcher, you can use the `watcher` profile: ```sh -docker compose run --rm cli queue set-last-seen-reference --head +just compose-up-watcher ``` -Then enable the docker-compose profile that includes the watcher: +It it was never run, we will start watching for registry changes at the current HEAD of the index. + +If you want to start from another point: ```sh -docker compose --profile watch up --build -d +just cli-queue-reset-last-seen-ref GIT_REF ``` -Note that running tests is not supported when using pure docker-compose. +Note that running tests is currently not supported when using pure docker-compose. + +Some of the above commands are included in the `Justfile` for ease of use, +check `just --list` for an overview. Some of the above commands are included in the `Justfile` for ease of use, check the `[compose]` group in `just --list`. @@ -184,26 +179,23 @@ Please file bugs for any trouble you have running docs.rs! ### Docker-Compose The services started by Docker-Compose are defined in [docker-compose.yml]. -Three services are defined: - -| name | access | credentials | description | -|------|-------------------------------------------------|----------------------------|----------------------------------------| -| web | http://0.0.0.0:3000 | N/A | A container running the docs.rs binary | -| db | postgresql://cratesfyi:password@127.0.0.1:15432 | - | Postgres database used by web | -| s3 | http://127.0.0.1:9000 | `cratesfyi` - `secret_key` | MinIO (simulates AWS S3) used by web | +For convenience, there are plenty of `just` recipes built around it. [docker-compose.yml]: ./docker-compose.yml #### Rebuilding Containers -To rebuild the site, run `docker compose --profile all build`. -Note that docker-compose caches the build even if you change the source code, -so this will be necessary anytime you make changes. +The `just` recipes for compose handle rebuilds themselves, so nothing needs to +be done here. If you want to completely clean up the database, don't forget to remove the volumes too: ```sh -$ docker compose down --volumes +# just shut down containers normally +$ just compose-down + +# shut down and clear all volumes. +$ just compose-down-and-wipe ``` #### FAQ diff --git a/build.rs b/build.rs index ae2f06821..7c7548e9c 100644 --- a/build.rs +++ b/build.rs @@ -71,7 +71,7 @@ mod tracked { fn main() -> Result<()> { let out_dir = env::var("OUT_DIR").context("missing OUT_DIR")?; let out_dir = Path::new(&out_dir); - write_git_version(out_dir)?; + read_git_version()?; compile_sass(out_dir)?; write_known_targets(out_dir)?; compile_syntax(out_dir).context("could not compile syntax files")?; @@ -81,16 +81,21 @@ fn main() -> Result<()> { Ok(()) } -fn write_git_version(out_dir: &Path) -> Result<()> { - let maybe_hash = get_git_hash()?; - let git_hash = maybe_hash.as_deref().unwrap_or("???????"); - - let build_date = time::OffsetDateTime::now_utc().date(); +fn read_git_version() -> Result<()> { + if let Ok(v) = env::var("GIT_SHA") { + // first try to read an externally provided git SAH, e.g., from CI + println!("cargo:rustc-env=GIT_SHA={v}"); + } else { + // then try to read the git repo. + let maybe_hash = get_git_hash()?; + let git_hash = maybe_hash.as_deref().unwrap_or("???????"); + println!("cargo:rustc-env=GIT_SHA={git_hash}"); + } - std::fs::write( - out_dir.join("git_version"), - format!("({git_hash} {build_date})"), - )?; + println!( + "cargo:rustc-env=BUILD_DATE={}", + time::OffsetDateTime::now_utc().date(), + ); Ok(()) } diff --git a/docker-compose.yml b/docker-compose.yml index 935f58048..b9b1c1261 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,3 +1,64 @@ +# main docker-compose file for local development. +# +# runs & configures by default: +# * `db` -> postgres db +# * `s3` -> minio +# +# optional profile: `web`: +# * `web` -> webserver +# +# optional profile: `builder`: +# * `builder-a` -> build-server 1 +# * `builder-b` -> build-server 2 +# ( two parallel build-servers, sharing nothing apart +# from the build queue they access) +# +# optional profile: `watcher`: +# * `registry-watcher` -> +# - crates.io registry watcher +# - repo-stats updater +# - cdn invalidator +# - release-rebuild-enqueuer +# +# optional profile: `metrics`: +# * `prometheus` -> configured prometheus instance +# +# optional profile: `full`: all of the above. +# +# Services purely for manual usage with `docker compose run` are: +# * `cli`: to run simple CLI commands that only need the database & S3 +# * `builder-cli`: to run CLI commands that need the build environment. +# * `registry-watcher-cli`: to run CLI commands that need the crates.io index. +# +# CAVEATS: +# * the build-servers have to run on the `linux/amd64` platform, +# while it doesn't matter for the rest of the services. +# This means for example on a Mac, the layers will be +# cached separately, once for `linux/amd64` and once for +# `linux/arm64`, which increases build-time. +# +# * volumes: typically docker-native volumes are faster than mounts, but +# sometimes annoying to inspect for debugging. +# For now we choose: +# * docker-native for DB, S3, rustwide workspace, crates.io index +# * mounts for prefix +# +# * prometheus scrape config is set to collect from the web server, the +# registry watcher, and the build servers. Scraping is not dynamic, so +# the local prometheus server will try to fetch from all service +# instances (web, watcher, builder), and just error in case the specific +# server isn't accessible. + +x-docker-cache: &docker-cache + # shared configuration to cache docker layers across CI runs. + # can just always be in in the `docker-compose.yml`, and will + # just do nothing if we're not on GHA. + # So feel free to add more techniques if needed. + cache_from: + - type=gha + cache_to: + - type=gha,mode=max + x-healthcheck: &healthcheck-interval interval: 10s timeout: 1s @@ -5,33 +66,37 @@ x-healthcheck: &healthcheck-interval start_period: 10s x-environment: &environment - RUST_BACKTRACE: true + # needed default environment that can be overwritten in `.docker.env` + # Example in `.docker.env.sample` + RUST_BACKTRACE: ${RUST_BACKTRACE:-true} + DOCSRS_LOG: ${DOCSRS_LOG:-docs_rs=trace,tower_http=debug,rustwide=info,aws_smithy_http=trace,axum=debug} + # fixed environment to connect the different services in this file properly DOCSRS_PREFIX: /opt/docsrs/prefix - DOCSRS_COMPILER_METRICS_PATH: /opt/docsrs/prefix/metrics - DOCSRS_DATABASE_URL: postgresql://cratesfyi:password@db DOCSRS_MIN_POOL_IDLE: 1 DOCSRS_MIN_POOL_SIZE: 2 DOCSRS_MAX_POOL_SIZE: 10 - DOCSRS_MAX_LEGACY_POOL_SIZE: 10 - DOCSRS_STORAGE_BACKEND: s3 - S3_ENDPOINT: http://s3:9000 AWS_ACCESS_KEY_ID: cratesfyi AWS_SECRET_ACCESS_KEY: secret_key - DOCSRS_RENDER_THREADS: 2 +x-builder-environment: &builder-environment + <<: *environment + # default builder environment that can be overwritten in `.docker.env` + DOCSRS_INCLUDE_DEFAULT_TARGETS: ${DOCSRS_INCLUDE_DEFAULT_TARGETS:-false} + DOCSRS_DOCKER_IMAGE: ${DOCSRS_DOCKER_IMAGE:-ghcr.io/rust-lang/crates-build-env/linux-micro} + # fixed environment to connect the different services in this file properly DOCSRS_RUSTWIDE_WORKSPACE: /opt/docsrs/rustwide + DOCSRS_COMPILER_METRICS_PATH: /opt/docsrs/prefix/metrics DOCSRS_DOCKER: true - DOCSRS_DOCKER_IMAGE: ghcr.io/rust-lang/crates-build-env/linux-micro DOCSRS_BUILD_CPU_LIMIT: 2 - DOCSRS_INCLUDE_DEFAULT_TARGETS: false x-build: &build context: . + <<: *docker-cache dockerfile: ./dockerfiles/Dockerfile args: PROFILE: dev @@ -41,11 +106,41 @@ x-builder: &builder build: <<: *build target: build-server + # build servers only work with linux/amd64 for now. + # This makes them much slower when working on a mac, but + # it needs some work and digging into the builder to fix + # this. platform: "linux/amd64" depends_on: - db - s3 - environment: *environment + environment: + <<: *builder-environment + env_file: + - .docker.env + healthcheck: + <<: *healthcheck-interval + test: curl --silent --fail localhost:3000/about/metrics + +x-registry-watcher: ®istry-watcher + build: + <<: *build + target: registry-watcher + depends_on: + - db + - s3 + volumes: + - "./ignored/docker-registry-watcher/prefix:/opt/docsrs/prefix" + - crates-io-index:/opt/docsrs/crates.io-index + environment: + <<: *environment + # the crates.io index can be shared between the registry watcher & its + # CLI service. + # So we configure it to be separate from the prefix. + # Also on a native docker volume for performance + REGISTRY_INDEX_PATH: /opt/docsrs/crates.io-index + # configure the rebuild-queuer, + DOCSRS_MAX_QUEUED_REBUILDS: ${DOCSRS_MAX_QUEUED_REBUILDS:-10} env_file: - .docker.env healthcheck: @@ -57,82 +152,104 @@ services: build: <<: *build target: web-server - platform: "linux/amd64" depends_on: - db - s3 ports: - - "3000:80" - environment: *environment + - "3000:3000" + environment: + <<: *environment + DOCSRS_RENDER_THREADS: 2 + DOCSRS_S3_STATIC_ROOT_PATH: http://localhost:9000/rust-docs-rs env_file: - .docker.env healthcheck: <<: *healthcheck-interval - test: curl --silent --fail localhost:80/about/metrics + test: curl --silent --fail localhost:3000/about/metrics + profiles: + - web + - full - # Include the registry watcher with - # `docker compose --profile watch up --build --wait` registry-watcher: - build: - <<: *build - target: registry-watcher - platform: "linux/amd64" - depends_on: - - db - volumes: - - "cratesio-index:/opt/docsrs/prefix/crates.io-index" - - "./static:/opt/docsrs/static:ro" - environment: *environment - env_file: - - .docker.env + <<: *registry-watcher profiles: - - watch - - all - healthcheck: - <<: *healthcheck-interval - test: curl --silent --fail localhost:3000/about/metrics + - watcher + - full + + registry-watcher-cli: + <<: *registry-watcher + profiles: + # watcher-CLI should not be run as background daemon, just manually + - manual builder-a: <<: *builder volumes: - - ".rustwide-docker/builder-a:/opt/docsrs/rustwide" - - "./ignored/cratesfyi-prefix/metrics:/opt/docsrs/prefix/metrics" + - "rustwide-builder-a:/opt/docsrs/rustwide" + - "./ignored/docker-builder-a/prefix:/opt/docsrs/prefix" + # this exposes the docker engine from the host machine + # to the build-server inside the container. - "/var/run/docker.sock:/var/run/docker.sock" + profiles: + - builder + - full builder-b: <<: *builder volumes: - - ".rustwide-docker/builder-b:/opt/docsrs/rustwide" - - "./ignored/cratesfyi-prefix/metrics:/opt/docsrs/prefix/metrics" + - "rustwide-builder-b:/opt/docsrs/rustwide" + - "./ignored/docker-builder-b/prefix:/opt/docsrs/prefix" + # this exposes the docker engine from the host machine + # to the build-server inside the container. + - "/var/run/docker.sock:/var/run/docker.sock" + profiles: + - builder + - full + + builder-cli: + <<: *builder + volumes: + - "rustwide-builder-cli:/opt/docsrs/rustwide" + - "./ignored/docker-builder-cli/prefix:/opt/docsrs/prefix" + # this exposes the docker engine from the host machine + # to the build-server inside the container. - "/var/run/docker.sock:/var/run/docker.sock" + profiles: + # builder-CLI should not be run as background daemon, just manually + - manual cli: build: <<: *build target: cli - platform: "linux/amd64" depends_on: + # only for clarification. + # When using "docker compose run", these dependencies are ignored, + # we handle this in our `just` commands. - db - s3 - volumes: - - "cratesio-index:/opt/docsrs/prefix/crates.io-index" environment: *environment + volumes: + - "./ignored/docker-cli/prefix:/opt/docsrs/prefix" env_file: - .docker.env profiles: - - all + # CLI should not be run as background daemon, just manually + - manual db: build: context: ./dockerfiles dockerfile: ./Dockerfile-postgres + <<: *docker-cache volumes: - postgres-data:/var/lib/postgresql/data environment: POSTGRES_USER: cratesfyi POSTGRES_PASSWORD: password ports: - # Use a non-standard port on the host to avoid conflicting with existing postgres servers + # Use a non-standard port on the host to avoid conflicting + # with existing postgres servers - "127.0.0.1:15432:5432" healthcheck: <<: *healthcheck-interval @@ -161,25 +278,44 @@ services: build: context: ./dockerfiles dockerfile: ./Dockerfile-prometheus + <<: *docker-cache ports: - "127.0.0.1:9090:9090" + # we intentionally don't define depends_on here. + # While the scrapers are configured to fetch from eventually running + # web or build-servers, adding these as dependency would mean we can't + # test metrics just with a webserver. + # Prometheus will just scrape from the working endpoints, and skip/error + # on the broken ones. healthcheck: <<: *healthcheck-interval test: promtool check healthy + profiles: + # we rarely need to test with actual prometheus, so always running + # it is a waste. + - metrics + - full gui_tests: + platform: "linux/amd64" build: context: . dockerfile: ./dockerfiles/Dockerfile-gui-tests + <<: *docker-cache network_mode: "host" extra_hosts: - "host.docker.internal:host-gateway" volumes: - "${PWD}:/build/out" profiles: - - all + # gui_tests should not be run as background daemon. + # Run via `just run-gui-tests`. + - manual volumes: postgres-data: {} minio-data: {} - cratesio-index: {} + crates-io-index: {} + rustwide-builder-a: {} + rustwide-builder-b: {} + rustwide-builder-cli: {} diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index ecf272747..42a820c47 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -1,3 +1,5 @@ +# syntax=docker/dockerfile:1 + # To produce a smaller image this Dockerfile contains two separate stages: in # the first one all the build dependencies are installed and docs.rs is built, # while in the second one just the runtime dependencies are installed, with the @@ -5,23 +7,39 @@ # # As of 2019-10-29 this reduces the image from 2.8GB to 500 MB. + ################# # Build stage # ################# -FROM ubuntu:24.04 AS build +FROM rust:1.91-slim-trixie AS build + +ENV DEBIAN_FRONTEND=noninteractive # Install packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - build-essential git curl cmake gcc g++ pkg-config libmagic-dev \ - libssl-dev zlib1g-dev ca-certificates mold clang - -# Install the stable toolchain with rustup -RUN curl https://sh.rustup.rs >/tmp/rustup-init && \ - chmod +x /tmp/rustup-init && \ - /tmp/rustup-init -y --no-modify-path --default-toolchain stable --profile minimal +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + git \ + curl \ + cmake \ + gcc \ + g++ \ + pkg-config \ + libmagic-dev \ + libssl-dev \ + zlib1g-dev \ + ca-certificates \ + mold \ + clang + ENV PATH=/root/.cargo/bin:$PATH +# get the git SHA from the build args, for our generated version numbers +ARG GIT_SHA=dev +ENV GIT_SHA=$GIT_SHA + # Configure linking to use mold instead for speed (need to use clang because gcc # is too old on this image) ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=clang @@ -34,23 +52,21 @@ ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=-Clink-arg=-fuse-ld=mold WORKDIR /build COPY benches benches COPY Cargo.lock Cargo.toml ./ -COPY crates/metadata crates/metadata/ -COPY crates/font-awesome-as-a-crate crates/font-awesome-as-a-crate +COPY crates crates RUN mkdir -p src/bin && \ echo "fn main() {}" > src/bin/cratesfyi.rs && \ echo "fn main() {}" > build.rs -RUN cargo fetch - ARG PROFILE=release -RUN cargo build --profile=$PROFILE +RUN --mount=type=cache,target=/usr/local/cargo/registry,id=cargo-registry,sharing=locked \ + --mount=type=cache,target=/build/target,id=cargo-target,sharing=locked \ + cargo build --profile=$PROFILE # Dependencies are now cached, copy the actual source code and do another full # build. The touch on all the .rs files is needed, otherwise cargo assumes the # source code didn't change thanks to mtime weirdness. RUN rm -rf src build.rs -COPY .git .git COPY build.rs build.rs RUN touch build.rs COPY src src/ @@ -61,96 +77,124 @@ COPY assets assets/ COPY .sqlx .sqlx/ COPY migrations migrations/ -RUN cargo build --profile=$PROFILE +ARG PROFILE_DIR=release +RUN --mount=type=cache,target=/usr/local/cargo/registry,id=cargo-registry,sharing=locked \ + --mount=type=cache,target=/build/target,id=cargo-target,sharing=locked \ + mkdir /artifacts && \ + cargo build --profile=$PROFILE && \ + cp /build/target/$PROFILE_DIR/cratesfyi /artifacts/ ###################### # Web server stage # ###################### -FROM ubuntu:24.04 AS web-server +FROM debian:trixie-slim AS web-server + +ENV DEBIAN_FRONTEND=noninteractive +# hadolint ignore=DL3008 RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + && apt-get install -y \ + --no-install-recommends \ ca-certificates \ curl \ tini \ + && apt-get clean \ && rm -rf /var/lib/apt/lists/* WORKDIR /srv/docsrs -# Tini is a small init binary to properly handle signals ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] -CMD ["start-web-server", "0.0.0.0:80"] +CMD ["start-web-server", "0.0.0.0:3000"] -ARG PROFILE_DIR=release -COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin +COPY --from=build /artifacts/cratesfyi /usr/local/bin COPY static /srv/docsrs/static -COPY templates /srv/docsrs/templates COPY vendor /srv/docsrs/vendor ######################## # Build server stage # ######################## +# * includes docker-cli, but expects the docker engine to be mapped into +# the container from the outside, e.g. via +# +# volumes: +# - "/var/run/docker.sock:/var/run/docker.sock" -FROM ubuntu:22.04 AS build-server +FROM debian:trixie-slim AS build-server +ENV DEBIAN_FRONTEND=noninteractive + +# hadolint ignore=DL3008 RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + && apt-get install -y \ + --no-install-recommends \ ca-certificates \ tini \ curl \ - docker.io \ + docker-cli \ build-essential \ gcc \ pkg-config \ libssl-dev \ + && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Tini is a small init binary to properly handle signals ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] -CMD ["start-build-server"] +CMD ["start-build-server", "0.0.0.0:3000"] + +COPY --from=build /artifacts/cratesfyi /usr/local/bin -ARG PROFILE_DIR=release -COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin ############################ # Registry watcher stage # ############################ -FROM ubuntu:22.04 AS registry-watcher +FROM debian:trixie-slim AS registry-watcher +ENV DEBIAN_FRONTEND=noninteractive + +# hadolint ignore=DL3008 RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + && apt-get install -y \ + --no-install-recommends \ ca-certificates \ tini \ curl \ git \ + && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Tini is a small init binary to properly handle signals ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] -CMD ["start-registry-watcher", "--repository-stats-updater=enabled", "--cdn-invalidator=enabled"] +CMD ["start-registry-watcher", "0.0.0.0:3000", "--repository-stats-updater=enabled", "--cdn-invalidator=enabled", "--queue-rebuilds=enabled"] -ARG PROFILE_DIR=release -COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin +COPY --from=build /artifacts/cratesfyi /usr/local/bin ############### # CLI stage # ############### +# This stage is used to run one-off commands like database migrations. +# not suited for commands that need: +# * the crates.io index, or +# * need to run builds. +# for these, use the build-server stage, or registry-watcher stage instead. -FROM ubuntu:22.04 AS cli +FROM debian:trixie-slim AS cli +ENV DEBIAN_FRONTEND=noninteractive + +# hadolint ignore=DL3008 RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + && apt-get install -y \ + --no-install-recommends \ ca-certificates \ tini \ + && apt-get clean \ && rm -rf /var/lib/apt/lists/* +WORKDIR /srv/docsrs +# copy migrations so we can run them via CLI script +COPY migrations migrations/ + ENTRYPOINT ["/usr/bin/tini", "/usr/local/bin/cratesfyi", "--"] -ARG PROFILE_DIR=release -COPY --from=build /build/target/$PROFILE_DIR/cratesfyi /usr/local/bin +COPY --from=build /artifacts/cratesfyi /usr/local/bin diff --git a/dockerfiles/Dockerfile-gui-tests b/dockerfiles/Dockerfile-gui-tests index c6a0aa87f..7ce888b4a 100644 --- a/dockerfiles/Dockerfile-gui-tests +++ b/dockerfiles/Dockerfile-gui-tests @@ -1,23 +1,29 @@ -FROM ubuntu:24.04 AS build +FROM node:22-trixie-slim -# Install packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - build-essential git curl cmake gcc g++ pkg-config libmagic-dev \ - libssl-dev zlib1g-dev ca-certificates +ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y \ - ca-certificates \ - curl \ - docker.io \ - gcc \ - git \ - libssl-dev \ - pkg-config \ - xz-utils +# Install packaged dependencies +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + git \ + curl \ + cmake \ + gcc \ + g++ \ + pkg-config \ + libmagic-dev \ + libssl-dev \ + zlib1g-dev \ + ca-certificates \ + docker.io \ + xz-utils # Install dependencies for chromium browser -RUN apt-get install -y \ - libasound2t64 \ +# hadolint ignore=DL3008 +RUN apt-get install -y --no-install-recommends \ + libasound2 \ libatk1.0-0 \ libatk-bridge2.0-0 \ libc6 \ @@ -28,7 +34,6 @@ RUN apt-get install -y \ libfontconfig1 \ libgbm-dev \ libgcc1 \ - libgdk-pixbuf2.0-0 \ libglib2.0-0 \ libgtk-3-0 \ libnspr4 \ @@ -54,9 +59,8 @@ RUN apt-get install -y \ xdg-utils \ wget -RUN curl -sL https://nodejs.org/dist/v22.13.1/node-v22.13.1-linux-x64.tar.xz | tar -xJ -ENV PATH="/node-v22.13.1-linux-x64/bin:${PATH}" -ENV NODE_PATH="/node-v22.13.1-linux-x64/lib/node_modules/" +RUN apt-get clean \ + && rm -rf /var/lib/apt/lists/* WORKDIR /build diff --git a/dockerfiles/prometheus.yml b/dockerfiles/prometheus.yml index 08a069372..082320e44 100644 --- a/dockerfiles/prometheus.yml +++ b/dockerfiles/prometheus.yml @@ -11,4 +11,4 @@ scrape_configs: - job_name: "docs.rs" metrics_path: "/about/metrics" static_configs: - - targets: ["web:3000"] + - targets: ["web:3000", "registry-watcher:3000", "builder-a:3000", "builder-b:3000"] diff --git a/dockerfiles/run-gui-tests.sh b/dockerfiles/run-gui-tests.sh deleted file mode 100755 index 1f4420739..000000000 --- a/dockerfiles/run-gui-tests.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# Just in case it's running, we stop the web server. -docker compose stop web - -docker compose up --wait --wait-timeout 30 db s3 - -# If we have a .env file, we need to temporarily move it so -# it doesn't make sqlx fail compilation. -if [ -f .env ]; then - mv .env .tmp.env -fi - -# We add the information we need. -cargo run -- database migrate -cargo run -- build update-toolchain -cargo run -- build crate sysinfo 0.23.4 -cargo run -- build crate sysinfo 0.23.5 -cargo run -- build crate libtest 0.0.1 -cargo run -- build add-essential-files - -if [ -f .tmp.env ]; then - mv .tmp.env .env -fi - -# In case we don't have a `.env`, we create one. -if [ ! -f .env ]; then - cp .env.sample .env -fi - -. .env - -set +e # We disable the "exit right away if command failed" setting. -cargo run -- start-web-server & -SERVER_PID=$! - -# status="docker run . -v `pwd`:/build/out:ro gui_tests" -docker compose build gui_tests -docker compose run --rm --remove-orphans gui_tests -status=$? -kill $SERVER_PID -exit $status diff --git a/justfiles/cli.just b/justfiles/cli.just new file mode 100644 index 000000000..a32b3692c --- /dev/null +++ b/justfiles/cli.just @@ -0,0 +1,97 @@ +# a collection of just commands to wrap various docs.rs CLI commands, +# and run them in a one-off docker container. +# _Which_ container depends on the command itself and its dependencies. +# Most service containers have their corresponding CLI container: +# * web -> cli +# * builder-x -> builder-cli +# * registry-watcher -> registry-watcher-cli + +# low-level helper to run any CLI command in its own one-off docker container, +# ensuring that `db` and `s3` are running. +_cli service_name *args: _touch-docker-env _ensure_db_and_s3_are_running + # dependencies in the docker-compose file are ignored + # when running a one-off service with `docker compose run`. + # Instead we explicitly start any dependent services first via + # `_ensure_db_and_s3_are_running`. + + docker compose run --build --rm {{ service_name }} {{ args }} + +# run any CLI command in its own one-off `cli` docker container. Args are passed to the container. +# Only for commands that just need `db` and `s3` and minimal system dependencies. +[group('cli')] +cli +args: _touch-docker-env cli-db-migrate + just _cli cli {{ args }} + +# Initialize the `docs.rs` database +[group('cli')] +[group('database')] +cli-db-migrate: + # intentially not using `cli` recipe, because it has a dependency on `cli-db-migrate`. + # Otherwise we would have a stack overflow / infinite recursion. + # + # TODO: potential optimization: only run the container when we have to + # run migrations? + just _cli cli database migrate + +# add a release to the build queue +[group('cli')] +[group('queue')] +cli-queue-add crate_name crate_version: + # only does things with the database, so can use the lightweight `cli` container. + just cli queue add {{ crate_name }} {{ crate_version }} + +# run builder CLI command in its own one-off `build-server` docker container. +# Uses a separate builder-cli container & workspace that doesn't conflict +# with the continiously running build-servers. +[group('cli')] +[group('builder')] +cli-build +args: _touch-docker-env cli-db-migrate + just _cli builder-cli {{ args }} + +# set the nightly rust version to be used for builds. Format: `nightly-YYYY-MM-DD` +# or just `nightly` for always using the latest nightly. +[group('cli')] +[group('builder')] +cli-build-set-toolchain name only_first_time="false": + #!/usr/bin/env bash + set -euo pipefail + + FLAG="" + if [ "{{only_first_time}}" = "true" ]; then FLAG="--only-first-time"; fi + just cli-build build set-toolchain {{ name }} $FLAG + +# update the toolchain in the builders +[group('cli')] +[group('builder')] +cli-build-update-toolchain: + just cli-build build update-toolchain + +# build & upload toolchain shared static resources +[group('cli')] +[group('builder')] +cli-build-add-essential-files: + just cli-build build add-essential-files + +# build a release +[group('cli')] +[group('builder')] +cli-build-crate name version: + just cli-build build crate {{ name }} {{ version }} + +# run registry-watcher CLI command in its own one-off `registry-watcher` docker container. +[group('cli')] +[group('watcher')] +cli-watcher +args: _touch-docker-env cli-db-migrate + just _cli registry-watcher-cli {{ args }} + +# Update last seen reference to the given hash, or the current `HEAD`. +[group('cli')] +[group('queue')] +cli-queue-reset-last-seen-ref ref="--head": + just cli-watcher queue set-last-seen-reference {{ ref }} + +# find differences between crates.io and our own database, and fix them on our side. +[group('cli')] +[group('database')] +cli-db-synchronize *args: + just cli-watcher database synchronize {{ args }} diff --git a/justfiles/services.just b/justfiles/services.just new file mode 100644 index 000000000..4bbbe29a8 --- /dev/null +++ b/justfiles/services.just @@ -0,0 +1,64 @@ +# run migrations, then launch one or more docker compose profiles in the background +[group('compose')] +compose-up *profiles: _touch-docker-env cli-db-migrate + docker compose {{ prepend("--profile ", profiles) }} up --build -d --wait --remove-orphans + +# Launch web server in the background +[group('compose')] +compose-up-web: + just compose-up web + +# Launch two build servers in the background +[group('compose')] +[group('builder')] +compose-up-builder: + just compose-up builder + +# Launch registry watcher in the background +[group('compose')] +[group('watcher')] +compose-up-watcher: + just compose-up watcher + +# Launch prometheus server in the background +[group('compose')] +compose-up-metrics: + just compose-up metrics + +# Launch everything, all at once, in the background +[group('compose')] +compose-up-full: + just compose-up full + +# Shutdown docker services, keep containers & volumes alive. +[group('compose')] +compose-down: + docker compose --profile full --profile manual down --remove-orphans + +# Shutdown docker services, then clean up docker images, volumes & other local artifacts from this docker-compose project +[group('compose')] +compose-down-and-wipe: + #!/usr/bin/env bash + set -euo pipefail + + docker compose --profile full --profile manual down --volumes --remove-orphans --rmi local + + # When testing this in CI, I had permission issues when trying to remove this folder. + # Likely it's related to the docker container runnning as a different (root?) user, so + # these files in the `ignored/` folder belong to `root`. + + # so we just try if we can use passwordless `sudo`: + if sudo -n true 2>/dev/null; then + echo "deleting ignored/ folder with sudo" + sudo -n rm -rf ignored/ + else + echo "trying to delete ignored/ folder with current user." + rm -rf ignored/ || { echo 'Failed to remove ignored/ - skipping' >&2; } + fi + + mkdir -p ignored + +# stream logs from all services running in docker-compose. Optionally specify services to tail logs from. +[group('compose')] +compose-logs *services: + docker compose --profile full logs -f {{ services }} diff --git a/justfiles/testing.just b/justfiles/testing.just new file mode 100644 index 000000000..bca7e24eb --- /dev/null +++ b/justfiles/testing.just @@ -0,0 +1,117 @@ +# just commands for CI & local development + +[group('testing')] +[group('sqlx')] +sqlx-prepare *args: _ensure_db_and_s3_are_running + cargo sqlx prepare \ + --database-url $DOCSRS_DATABASE_URL \ + --workspace {{ args }} \ + -- --all-targets --all-features + +[group('testing')] +[group('sqlx')] +sqlx-check: + just sqlx-prepare --check + +[group('testing')] +[group('sqlx')] +sqlx-migrate-run: _ensure_db_and_s3_are_running + cargo sqlx migrate run --database-url $DOCSRS_DATABASE_URL + +[group('testing')] +[group('sqlx')] +sqlx-migrate-revert target="0": _ensure_db_and_s3_are_running + # --target 0 means "revert everything" + cargo sqlx migrate revert \ + --database-url $DOCSRS_DATABASE_URL \ + --target-version {{ target }} + + +# Format the code using `cargo fmt`. +[group('testing')] +format: + rustup component add rustfmt + # like this we get both the non-zero exit code, and the local code is + # formatted. + cargo fmt --all -- --check || { cargo fmt --all && exit 1; } + +# run clippy, in our config +[group('testing')] +clippy *args: + rustup component add clippy + cargo clippy \ + --all-features \ + --all-targets \ + --workspace \ + --locked \ + {{ args }} \ + -- -D warnings + +# run clippy --fix +[group('testing')] +clippy-fix: + just clippy --fix --allow-dirty --allow-staged + +# run all linters, for local development & CI +[group('testing')] +lint: format + #!/usr/bin/env bash + set -euo pipefail + + if [ "${GITHUB_ACTIONS:-}" = "true" ]; then + just clippy + else + just clippy-fix + fi + + +[group('testing')] +lint-js *args: + deno run -A npm:eslint@9 static templates gui-tests eslint.config.js {{ args }} + +[group('testing')] +run-gui-tests: _ensure_db_and_s3_are_running cli-db-migrate compose-up-web + just cli-build-update-toolchain + just cli-build-crate sysinfo 0.23.4 + just cli-build-crate sysinfo 0.23.5 + just cli-build-crate libtest 0.0.1 + just cli-build-add-essential-files + + just _cli gui_tests + +_build-test-binaries: + #!/usr/bin/env bash + set -euo pipefail + + export SQLX_OFFLINE=1 + cargo test --no-run --workspace --locked + +[group('testing')] +run-tests: _ensure_db_and_s3_are_running _build-test-binaries + #!/usr/bin/env bash + set -euo pipefail + + export DOCSRS_INCLUDE_DEFAULT_TARGETS=false + export DOCSRS_LOG=docs_rs=debug,rustwide=info + export RUST_BACKTRACE=1 + export DOCSRS_PREFIX=ignored/cratesfyi-prefix + export DOCSRS_DATABASE_URL=postgresql://cratesfyi:password@localhost:15432 + export AWS_ACCESS_KEY_ID=cratesfyi + export AWS_SECRET_ACCESS_KEY=secret_key + export S3_ENDPOINT=http://localhost:9000 + + cargo test --workspace --locked --no-fail-fast + +[group('testing')] +[group('builder')] +run-builder-tests: _ensure_db_and_s3_are_running + #!/usr/bin/env bash + set -euo pipefail + + export DOCSRS_INCLUDE_DEFAULT_TARGETS=true + export DOCSRS_DOCKER_IMAGE=ghcr.io/rust-lang/crates-build-env/linux-micro + export DOCSRS_LOG=docs_rs=debug,rustwide=info + export RUST_BACKTRACE=1 + export DOCSRS_PREFIX=ignored/cratesfyi-prefix + + cargo test --locked -- --ignored --test-threads=1 diff --git a/justfiles/utils.just b/justfiles/utils.just new file mode 100644 index 000000000..866f6a677 --- /dev/null +++ b/justfiles/utils.just @@ -0,0 +1,9 @@ + +_ensure_db_and_s3_are_running: _touch-docker-env + # dependencies in the docker-cli file are ignored + # here. Instead we explicitly start any dependent services first. + docker compose up -d db s3 --wait + +_touch-docker-env: + touch .docker.env + diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index dc2a63fd4..0eeefd036 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -1,7 +1,7 @@ use anyhow::{Context as _, Result, anyhow}; use clap::{Parser, Subcommand, ValueEnum}; use docs_rs::{ - Config, Context, PackageKind, RustwideBuilder, + Config, Context, Index, PackageKind, RustwideBuilder, db::{self, CrateId, Overrides, ReleaseId, add_path_into_database, types::version::Version}, start_background_metrics_webserver, start_web_server, utils::{ @@ -202,7 +202,8 @@ impl CommandLine { start_background_metrics_webserver(Some(metric_server_socket_addr), &ctx)?; ctx.runtime.block_on(async move { - docs_rs::utils::watch_registry(&ctx.async_build_queue, &ctx.config, ctx.index) + let index = Index::from_config(&ctx.config).await?; + docs_rs::utils::watch_registry(&ctx.async_build_queue, &ctx.config, &index) .await })?; } @@ -298,7 +299,10 @@ impl QueueSubcommand { (Some(reference), false) => reference, (None, true) => { println!("Fetching changes to set reference to HEAD"); - ctx.runtime.block_on(ctx.index.latest_commit_reference())? + ctx.runtime.block_on(async move { + let index = Index::from_config(&ctx.config).await?; + index.latest_commit_reference().await + })? } (_, _) => unreachable!(), }; @@ -435,6 +439,8 @@ impl BuildSubcommand { } => { let mut builder = rustwide_builder()?; + builder.update_toolchain_and_add_essential_files()?; + if let Some(path) = local { builder .build_local_package(&path) @@ -473,17 +479,7 @@ impl BuildSubcommand { return Ok(()); } - rustwide_builder()? - .update_toolchain() - .context("failed to update toolchain")?; - - rustwide_builder()? - .purge_caches() - .context("failed to purge caches")?; - - rustwide_builder()? - .add_essential_files() - .context("failed to add essential files")?; + rustwide_builder()?.update_toolchain_and_add_essential_files()?; } Self::AddEssentialFiles => { diff --git a/src/build_queue.rs b/src/build_queue.rs index d20213712..dafc84d1b 100644 --- a/src/build_queue.rs +++ b/src/build_queue.rs @@ -15,7 +15,7 @@ use futures_util::{StreamExt, stream::TryStreamExt}; use sqlx::Connection as _; use std::{collections::HashMap, sync::Arc}; use tokio::runtime; -use tracing::{debug, error, info, instrument}; +use tracing::{debug, error, info, instrument, warn}; /// The static priority for background rebuilds. /// Used when queueing rebuilds, and when rendering them @@ -234,10 +234,18 @@ impl AsyncBuildQueue { /// /// Returns the number of crates added pub async fn get_new_crates(&self, index: &Index) -> Result { - let last_seen_reference = self - .last_seen_reference() - .await? - .context("no last_seen_reference set in database")?; + let last_seen_reference = self.last_seen_reference().await?; + let last_seen_reference = if let Some(oid) = last_seen_reference { + oid + } else { + warn!( + "no last-seen reference found in our database. We assume a fresh install and + set the latest reference (HEAD) as last. This means we will then start to queue + builds for new releases only from now on, and not for all existing releases." + ); + index.latest_commit_reference().await? + }; + index.set_last_seen_reference(last_seen_reference).await?; let (changes, new_reference) = index.peek_changes_ordered().await?; @@ -596,35 +604,6 @@ impl BuildQueue { Ok(()) } - fn update_toolchain(&self, builder: &mut RustwideBuilder) -> Result<()> { - let updated = retry( - || { - builder - .update_toolchain() - .context("downloading new toolchain failed") - }, - 3, - )?; - - if updated { - // toolchain has changed, purge caches - retry( - || { - builder - .purge_caches() - .context("purging rustwide caches failed") - }, - 3, - )?; - - builder - .add_essential_files() - .context("adding essential files failed")?; - } - - Ok(()) - } - /// Builds the top package from the queue. Returns whether there was a package in the queue. /// /// Note that this will return `Ok(true)` even if the package failed to build. @@ -657,8 +636,8 @@ impl BuildQueue { return Err(err); } - if let Err(err) = self - .update_toolchain(&mut *builder) + if let Err(err) = builder + .update_toolchain_and_add_essential_files() .context("Updating toolchain failed, locking queue") { report_error(&err); diff --git a/src/context.rs b/src/context.rs index c87b3d293..631d3e583 100644 --- a/src/context.rs +++ b/src/context.rs @@ -2,7 +2,7 @@ use crate::cdn::CdnBackend; use crate::db::Pool; use crate::repositories::RepositoryStatsUpdater; use crate::{ - AsyncBuildQueue, AsyncStorage, BuildQueue, Config, Index, InstanceMetrics, RegistryApi, + AsyncBuildQueue, AsyncStorage, BuildQueue, Config, InstanceMetrics, RegistryApi, ServiceMetrics, Storage, }; use anyhow::Result; @@ -19,7 +19,6 @@ pub struct Context { pub pool: Pool, pub service_metrics: Arc, pub instance_metrics: Arc, - pub index: Arc, pub registry_api: Arc, pub repository_stats_updater: Arc, pub runtime: runtime::Handle, @@ -67,10 +66,6 @@ impl Context { let cdn = Arc::new(CdnBackend::new(&config).await); - let index = Arc::new( - Index::from_url(&config.registry_index_path, config.registry_url.as_deref()).await?, - ); - let runtime = runtime::Handle::current(); // sync wrappers around build-queue & storage async resources let build_queue = Arc::new(BuildQueue::new(runtime.clone(), async_build_queue.clone())); @@ -85,7 +80,6 @@ impl Context { pool: pool.clone(), service_metrics: Arc::new(ServiceMetrics::new()?), instance_metrics, - index, registry_api: Arc::new(RegistryApi::new( config.registry_api_host.clone(), config.crates_io_api_call_retries, diff --git a/src/docbuilder/rustwide_builder.rs b/src/docbuilder/rustwide_builder.rs index d02880a96..83e2f2639 100644 --- a/src/docbuilder/rustwide_builder.rs +++ b/src/docbuilder/rustwide_builder.rs @@ -18,7 +18,7 @@ use crate::{ }, utils::{ CargoMetadata, ConfigName, MetadataPackage, copy_dir_all, get_config, parse_rustc_version, - report_error, set_config, + report_error, retry, set_config, }, }; use anyhow::{Context as _, Error, anyhow, bail}; @@ -173,11 +173,46 @@ impl RustwideBuilder { Ok(()) } - pub fn update_toolchain(&mut self) -> Result { + #[instrument(skip_all)] + pub fn update_toolchain_and_add_essential_files(&mut self) -> Result<()> { + info!("try updating the toolchain"); + let updated = retry( + || { + self.update_toolchain() + .context("downloading new toolchain failed") + }, + 3, + )?; + + debug!(updated, "toolchain update check complete"); + + if updated { + // toolchain has changed, purge caches + retry( + || { + self.purge_caches() + .context("purging rustwide caches failed") + }, + 3, + )?; + + self.add_essential_files() + .context("adding essential files failed")?; + } + + Ok(()) + } + + #[instrument(skip_all)] + fn update_toolchain(&mut self) -> Result { self.toolchain = self.runtime.block_on(async { let mut conn = self.db.get_async().await?; get_configured_toolchain(&mut conn).await })?; + debug!( + configured_toolchain = self.toolchain.to_string(), + "configured toolchain" + ); // For CI builds, a lot of the normal update_toolchain things don't apply. // CI builds are only for one platform (https://forge.rust-lang.org/infra/docs/rustc-ci.html#try-builds) @@ -246,7 +281,72 @@ impl RustwideBuilder { } } - let has_changed = old_version != Some(self.rustc_version()?); + let new_version = self.rustc_version()?; + debug!(new_version, "detected new rustc version"); + let mut has_changed = old_version.as_ref() != Some(&new_version); + + if !has_changed { + // This fixes an edge-case on a fresh build server. + // + // It seems like on the fresh server, there _is_ a recent nightly toolchain + // installed. In this case, this method will just install necessary components and + // doc-targets/platforms. + // + // But: *for this local old toolchain, we never ran `add_essential_files`*, because it + // was not installed by us. + // + // Now the culprit: even through we "fix" the previously installed nightly toolchain + // with the needed components & targets, we return "updated = false", since the + // version number didn't change. + // + // As a result, `BuildQueue::update_toolchain` will not call `add_essential_files`, + // which then means we don't have the toolchain-shared static files on our S3 bucket. + // + // The workaround specifically for `add_essential_files` is the following: + // + // After `add_essential_files` is finished, it sets `ConfigName::RustcVersion` in the + // config database to the rustc version it uploaded the essential files for. + // + // This means, if `ConfigName::RustcVersion` is empty, or different from the current new + // version, we can set `updated = true` too. + // + // I feel like there are more edge-cases, but for now this is OK. + // + // Alternative would have been to run `build update-toolchain --only-first-time` + // in a newly created `ENTRYPOINT` script for the build-server. This is how it was + // done in the previous (one-dockerfile-and-process-for-everything) approach. + // The `entrypoint.sh` script did call `add-essential-files --only-first-time`. + // + // Problem with that approach: this approach postpones the boot process of the + // build-server, where docker and later the infra will try to check with a HTTP + // endpoint to see if the build server is ready. + // + // So I leaned to towards a more self-contained solution which doesn't need docker + // at all, and also would work if you run the build-server directly on your machine. + // + // Fixing it here also means the startup of the actual build-server including its + // metrics collection endpoints don't be delayed. Generally should doesn't be + // a differene how much time is needed on a fresh build-server, between picking the + // release up from the queue, and actually starting to build the release. In the old + // solution, the entrypoint would do the toolchain-update & add-essential files + // before even starting the build-server, now we're roughly doing the same thing + // inside the main builder loop. + + let rustc_version = self.runtime.block_on({ + let pool = self.db.clone(); + async move { + let mut conn = pool + .get_async() + .await + .context("failed to get a database connection")?; + + get_config::(&mut conn, ConfigName::RustcVersion).await + } + })?; + + has_changed = rustc_version.is_none() || rustc_version != Some(new_version); + } + Ok(has_changed) } diff --git a/src/index.rs b/src/index.rs index 133c42a07..01addd16c 100644 --- a/src/index.rs +++ b/src/index.rs @@ -1,4 +1,5 @@ use crate::{ + Config, error::Result, utils::{report_error, run_blocking}, }; @@ -24,6 +25,10 @@ pub struct Index { } impl Index { + pub async fn from_config(config: &Config) -> Result { + Index::from_url(&config.registry_index_path, config.registry_url.as_deref()).await + } + pub async fn from_url( path: impl AsRef, repository_url: Option>, diff --git a/src/lib.rs b/src/lib.rs index 32ab281e0..fc9d1423f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -56,8 +56,21 @@ pub(crate) static GLOBAL_ALERT: Option = Some(GlobalAlert { /// commit hash and build date pub const BUILD_VERSION: &str = concat!( env!("CARGO_PKG_VERSION"), + " (", + env!("GIT_SHA"), " ", - include_str!(concat!(env!("OUT_DIR"), "/git_version")) + env!("BUILD_DATE"), + " )" +); + +pub const APP_USER_AGENT: &str = concat!( + env!("CARGO_PKG_NAME"), + " ", + " (", + env!("GIT_SHA"), + " ", + env!("BUILD_DATE"), + " )" ); /// Where rustdoc's static files are stored in S3. diff --git a/src/registry_api.rs b/src/registry_api.rs index 0631d7809..da6a018fe 100644 --- a/src/registry_api.rs +++ b/src/registry_api.rs @@ -1,4 +1,4 @@ -use crate::{db::types::version::Version, error::Result, utils::retry_async}; +use crate::{APP_USER_AGENT, db::types::version::Version, error::Result, utils::retry_async}; use anyhow::{Context, anyhow, bail}; use chrono::{DateTime, Utc}; use reqwest::header::{ACCEPT, HeaderValue, USER_AGENT}; @@ -7,12 +7,6 @@ use std::fmt; use tracing::instrument; use url::Url; -const APP_USER_AGENT: &str = concat!( - env!("CARGO_PKG_NAME"), - " ", - include_str!(concat!(env!("OUT_DIR"), "/git_version")) -); - #[derive(Debug)] pub struct RegistryApi { api_base: Url, diff --git a/src/repositories/github.rs b/src/repositories/github.rs index 986ac2c84..e37a73a20 100644 --- a/src/repositories/github.rs +++ b/src/repositories/github.rs @@ -9,9 +9,11 @@ use reqwest::{ use serde::Deserialize; use tracing::{trace, warn}; -use crate::repositories::{ - APP_USER_AGENT, FetchRepositoriesResult, RateLimitReached, Repository, RepositoryForge, - RepositoryName, +use crate::{ + APP_USER_AGENT, + repositories::{ + FetchRepositoriesResult, RateLimitReached, Repository, RepositoryForge, RepositoryName, + }, }; const GRAPHQL_UPDATE: &str = "query($ids: [ID!]!) { diff --git a/src/repositories/gitlab.rs b/src/repositories/gitlab.rs index c1fb70b95..c09c7c280 100644 --- a/src/repositories/gitlab.rs +++ b/src/repositories/gitlab.rs @@ -10,9 +10,11 @@ use std::collections::HashSet; use std::str::FromStr; use tracing::warn; -use crate::repositories::{ - APP_USER_AGENT, FetchRepositoriesResult, RateLimitReached, Repository, RepositoryForge, - RepositoryName, +use crate::{ + APP_USER_AGENT, + repositories::{ + FetchRepositoriesResult, RateLimitReached, Repository, RepositoryForge, RepositoryName, + }, }; const GRAPHQL_UPDATE: &str = "query($ids: [ID!]!) { diff --git a/src/repositories/mod.rs b/src/repositories/mod.rs index 9a6932390..2376e33cf 100644 --- a/src/repositories/mod.rs +++ b/src/repositories/mod.rs @@ -5,12 +5,6 @@ pub use self::updater::{ FetchRepositoriesResult, Repository, RepositoryForge, RepositoryStatsUpdater, }; -pub const APP_USER_AGENT: &str = concat!( - env!("CARGO_PKG_NAME"), - " ", - include_str!(concat!(env!("OUT_DIR"), "/git_version")) -); - #[derive(Debug, thiserror::Error)] #[error("rate limit reached")] struct RateLimitReached; diff --git a/src/utils/daemon.rs b/src/utils/daemon.rs index afdaf02f9..ef4bf73c1 100644 --- a/src/utils/daemon.rs +++ b/src/utils/daemon.rs @@ -21,7 +21,7 @@ use tracing::{debug, info}; pub async fn watch_registry( build_queue: &AsyncBuildQueue, config: &Config, - index: Arc, + index: &Index, ) -> Result<(), Error> { let mut last_gc = Instant::now(); @@ -31,7 +31,7 @@ pub async fn watch_registry( } else { debug!("Checking new crates"); match build_queue - .get_new_crates(&index) + .get_new_crates(index) .await .context("Failed to get new crates") { @@ -51,13 +51,13 @@ pub async fn watch_registry( fn start_registry_watcher(context: &Context) -> Result<(), Error> { let build_queue = context.async_build_queue.clone(); let config = context.config.clone(); - let index = context.index.clone(); context.runtime.spawn(async move { // space this out to prevent it from clashing against the queue-builder thread on launch tokio::time::sleep(Duration::from_secs(30)).await; - watch_registry(&build_queue, &config, index).await + let index = Index::from_config(&config).await?; + watch_registry(&build_queue, &config, &index).await }); Ok(()) diff --git a/src/utils/queue_builder.rs b/src/utils/queue_builder.rs index 8a1d90502..292cedfc0 100644 --- a/src/utils/queue_builder.rs +++ b/src/utils/queue_builder.rs @@ -6,10 +6,13 @@ use std::time::Duration; use std::{fs, io, path::Path, thread}; use tracing::{debug, error, warn}; +/// the main build-server loop pub fn queue_builder(context: &Context, mut builder: RustwideBuilder) -> Result<(), Error> { loop { let temp_dir = &context.config.temp_dir; - if let Err(e) = remove_tempdirs(temp_dir) { + if temp_dir.exists() + && let Err(e) = remove_tempdirs(temp_dir) + { report_error(&anyhow::anyhow!(e).context(format!( "failed to clean temporary directory {:?}", temp_dir