Skip to content

Install inference-cli before forcing numpy to prevent overwriting with numpy 2 #1441

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1
Original file line number Diff line number Diff line change
Expand Up @@ -57,25 +57,25 @@ RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \
--upgrade \
&& rm -rf ~/.cache/pip

# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime
RUN python3.9 -m pip uninstall --yes onnxruntime numpy
RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl
RUN python3.9 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \
&& rm -rf ~/.cache/pip \
&& rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl

WORKDIR /app/
COPY inference inference
COPY docker/config/gpu_http.py gpu_http.py
COPY inference_cli inference_cli
COPY inference_sdk inference_sdk
COPY docker/config/gpu_http.py gpu_http.py
COPY .release .release
COPY requirements requirements
COPY Makefile Makefile

RUN make create_inference_cli_whl PYTHON=python3.9
RUN python3.9 -m pip install dist/inference_cli*.whl

# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime
RUN python3.9 -m pip uninstall --yes onnxruntime numpy
RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl
RUN python3.9 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \
&& rm -rf ~/.cache/pip \
&& rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl

ENV VERSION_CHECK_MODE=continuous \
PROJECT=roboflow-platform \
ORT_TENSORRT_FP16_ENABLE=1 \
Expand Down
13 changes: 7 additions & 6 deletions docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,6 @@ RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \
--upgrade \
&& rm -rf ~/.cache/pip

RUN python3.9 -m pip uninstall --yes onnxruntime
RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl
RUN python3.9 -m pip install onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \
&& rm -rf ~/.cache/pip \
&& rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl

WORKDIR /app/
COPY inference_cli/ ./inference_cli/
COPY inference_sdk/ ./inference_sdk/
Expand All @@ -65,6 +59,13 @@ COPY Makefile Makefile
RUN make create_inference_cli_whl PYTHON=python3.9
RUN python3.9 -m pip install dist/inference_cli*.whl

# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime
RUN python3.9 -m pip uninstall --yes onnxruntime
RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl
RUN python3.9 -m pip install onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \
&& rm -rf ~/.cache/pip \
&& rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl

ENV ORT_TENSORRT_FP16_ENABLE=1 \
ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \
CORE_MODEL_SAM_ENABLED=False \
Expand Down
14 changes: 7 additions & 7 deletions docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,6 @@ RUN python3 -m pip install --upgrade pip && \
-r requirements/requirements.jetson.txt \
"setuptools<=75.5.0"

RUN wget https://nvidia.box.com/shared/static/6l0u97rj80ifwkk8rqbzj1try89fk26z.whl -O onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl
RUN python3 -m pip uninstall -y numpy && python3 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl \
&& rm -rf ~/.cache/pip \
&& rm onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl

# Set up the application runtime
WORKDIR /app
COPY inference/ ./inference/
Expand All @@ -55,6 +50,12 @@ COPY Makefile Makefile
RUN make create_inference_cli_whl PYTHON=python3
RUN pip3 install dist/inference_cli*.whl

# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime
RUN wget https://nvidia.box.com/shared/static/6l0u97rj80ifwkk8rqbzj1try89fk26z.whl -O onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl
RUN python3 -m pip uninstall -y numpy && python3 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl \
&& rm -rf ~/.cache/pip \
&& rm onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl

# Set environment variables
ENV VERSION_CHECK_MODE=continuous \
PROJECT=roboflow-platform \
Expand All @@ -73,8 +74,7 @@ ENV VERSION_CHECK_MODE=continuous \
RUNS_ON_JETSON=True \
ENABLE_PROMETHEUS=True \
ENABLE_STREAM_API=True \
STREAM_API_PRELOADED_PROCESSES=2 \
PYTHONPATH=/app:$PYTHONPATH
STREAM_API_PRELOADED_PROCESSES=2

# Expose the application port
EXPOSE 9001
Expand Down
5 changes: 2 additions & 3 deletions docker/dockerfiles/Dockerfile.onnx.jetson.6.2.0
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN python3 -m pip install --upgrade pip && \
-r requirements/requirements.gpu.txt \
"setuptools<=75.5.0" \
--extra-index-url https://pypi.jetson-ai-lab.dev/jp6/cu126
RUN python3 -m pip install "numpy<=1.26.4"
RUN python3 -m pip uninstall -y numpy && python3 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl

# Set up the application runtime
WORKDIR /app
Expand Down Expand Up @@ -78,8 +78,7 @@ ENV VERSION_CHECK_MODE=continuous \
RUNS_ON_JETSON=True \
ENABLE_PROMETHEUS=True \
ENABLE_STREAM_API=True \
STREAM_API_PRELOADED_PROCESSES=2 \
PYTHONPATH=/app:$PYTHONPATH
STREAM_API_PRELOADED_PROCESSES=2

# Expose the application port
EXPOSE 9001
Expand Down