diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 index 695d1d1ff3..5cbf62d175 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 @@ -57,18 +57,11 @@ RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \ --upgrade \ && rm -rf ~/.cache/pip -# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime -RUN python3.9 -m pip uninstall --yes onnxruntime numpy -RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl -RUN python3.9 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \ - && rm -rf ~/.cache/pip \ - && rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl - WORKDIR /app/ COPY inference inference +COPY docker/config/gpu_http.py gpu_http.py COPY inference_cli inference_cli COPY inference_sdk inference_sdk -COPY docker/config/gpu_http.py gpu_http.py COPY .release .release COPY requirements requirements COPY Makefile Makefile @@ -76,6 +69,13 @@ COPY Makefile Makefile RUN make create_inference_cli_whl PYTHON=python3.9 RUN python3.9 -m pip install dist/inference_cli*.whl +# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime +RUN python3.9 -m pip uninstall --yes onnxruntime numpy +RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl +RUN python3.9 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \ + && rm -rf ~/.cache/pip \ + && rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl + ENV VERSION_CHECK_MODE=continuous \ PROJECT=roboflow-platform \ ORT_TENSORRT_FP16_ENABLE=1 \ diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager index 183f1ecb0b..6ea6ed9911 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager @@ -48,12 +48,6 @@ RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \ --upgrade \ && rm -rf ~/.cache/pip -RUN python3.9 -m pip uninstall --yes onnxruntime -RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl -RUN python3.9 -m pip install onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \ - && rm -rf ~/.cache/pip \ - && rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl - WORKDIR /app/ COPY inference_cli/ ./inference_cli/ COPY inference_sdk/ ./inference_sdk/ @@ -65,6 +59,13 @@ COPY Makefile Makefile RUN make create_inference_cli_whl PYTHON=python3.9 RUN python3.9 -m pip install dist/inference_cli*.whl +# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime +RUN python3.9 -m pip uninstall --yes onnxruntime +RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl +RUN python3.9 -m pip install onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4,<=4.10.0.84" \ + && rm -rf ~/.cache/pip \ + && rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl + ENV ORT_TENSORRT_FP16_ENABLE=1 \ ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \ CORE_MODEL_SAM_ENABLED=False \ diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 829ce3c142..9b0d037837 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -37,11 +37,6 @@ RUN python3 -m pip install --upgrade pip && \ -r requirements/requirements.jetson.txt \ "setuptools<=75.5.0" -RUN wget https://nvidia.box.com/shared/static/6l0u97rj80ifwkk8rqbzj1try89fk26z.whl -O onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl -RUN python3 -m pip uninstall -y numpy && python3 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl \ - && rm -rf ~/.cache/pip \ - && rm onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl - # Set up the application runtime WORKDIR /app COPY inference/ ./inference/ @@ -55,6 +50,12 @@ COPY Makefile Makefile RUN make create_inference_cli_whl PYTHON=python3 RUN pip3 install dist/inference_cli*.whl +# BE CAREFUL, WE ENFORCE numpy 1.x for the sake of compatibility with onnxruntime +RUN wget https://nvidia.box.com/shared/static/6l0u97rj80ifwkk8rqbzj1try89fk26z.whl -O onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl +RUN python3 -m pip uninstall -y numpy && python3 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl \ + && rm -rf ~/.cache/pip \ + && rm onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl + # Set environment variables ENV VERSION_CHECK_MODE=continuous \ PROJECT=roboflow-platform \ @@ -73,8 +74,7 @@ ENV VERSION_CHECK_MODE=continuous \ RUNS_ON_JETSON=True \ ENABLE_PROMETHEUS=True \ ENABLE_STREAM_API=True \ - STREAM_API_PRELOADED_PROCESSES=2 \ - PYTHONPATH=/app:$PYTHONPATH + STREAM_API_PRELOADED_PROCESSES=2 # Expose the application port EXPOSE 9001 diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.2.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.2.0 index 17b23b0ea8..9de450de31 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.2.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.2.0 @@ -45,7 +45,7 @@ RUN python3 -m pip install --upgrade pip && \ -r requirements/requirements.gpu.txt \ "setuptools<=75.5.0" \ --extra-index-url https://pypi.jetson-ai-lab.dev/jp6/cu126 -RUN python3 -m pip install "numpy<=1.26.4" +RUN python3 -m pip uninstall -y numpy && python3 -m pip install "numpy<=1.26.4" onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl # Set up the application runtime WORKDIR /app @@ -78,8 +78,7 @@ ENV VERSION_CHECK_MODE=continuous \ RUNS_ON_JETSON=True \ ENABLE_PROMETHEUS=True \ ENABLE_STREAM_API=True \ - STREAM_API_PRELOADED_PROCESSES=2 \ - PYTHONPATH=/app:$PYTHONPATH + STREAM_API_PRELOADED_PROCESSES=2 # Expose the application port EXPOSE 9001