Replies: 1 comment
-
两条pipeline部署在一台服务器上是支持的。这个错误看起来可能和产线配置有关,请提供具体的产线配置。 |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
如题。
from paddlex import create_pipeline
ocr_pipeline = create_pipeline(
pipeline="./OCR.yaml"
)
from paddlex import create_pipeline
seal_pipeline = create_pipeline(
pipeline="./seal_recognition.yaml"
)
环境
python= 3.10
paddlepaddle-gpu==3.0.0
paddleocr==2.10.0
paddlex==3.0.0
单独 一条 pipeline 都是可以的,放在一起报错。
日志
Initializing PaddleX pipeline...
PaddleX pipeline initialized
Initializing PaddleX seal pipeline...
[2025-07-03 06:04:38] [gunicorn.error] [ERROR] Exception in worker process
Traceback (most recent call last):
File "/usr/local/lib/python3.10/site-packages/gunicorn/arbiter.py", line 608, in spawn_worker
worker.init_process()
File "/usr/local/lib/python3.10/site-packages/gunicorn/workers/ggevent.py", line 146, in init_process
super().init_process()
File "/usr/local/lib/python3.10/site-packages/gunicorn/workers/base.py", line 135, in init_process
self.load_wsgi()
File "/usr/local/lib/python3.10/site-packages/gunicorn/workers/base.py", line 147, in load_wsgi
self.wsgi = self.app.wsgi()
File "/usr/local/lib/python3.10/site-packages/gunicorn/app/base.py", line 66, in wsgi
self.callable = self.load()
File "/usr/local/lib/python3.10/site-packages/gunicorn/app/wsgiapp.py", line 57, in load
return self.load_wsgiapp()
File "/usr/local/lib/python3.10/site-packages/gunicorn/app/wsgiapp.py", line 47, in load_wsgiapp
return util.import_app(self.app_uri)
File "/usr/local/lib/python3.10/site-packages/gunicorn/util.py", line 370, in import_app
mod = importlib.import_module(module)
File "/usr/local/lib/python3.10/importlib/init.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "", line 1050, in _gcd_import
File "", line 1027, in _find_and_load
File "", line 1006, in _find_and_load_unlocked
File "", line 688, in _load_unlocked
File "", line 883, in exec_module
File "", line 241, in _call_with_frames_removed
File "/opt/app/wsgi.py", line 1, in
from paddle_proxy.web import app
File "/opt/app/paddle_proxy/web.py", line 10, in
from paddle_proxy.services import ocr_from_base64 as ocr_from_base64
File "/opt/app/paddle_proxy/services.py", line 40, in
init_seal_pipeline()
File "/opt/app/paddle_proxy/seal_rec_service.py", line 19, in init_seal_pipeline
pipeline = create_pipeline(pipeline=PIPELINE_CONFIG_PATH, device='gpu')
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/pipelines/init.py", line 165, in create_pipeline
pipeline = BasePipeline.get(pipeline_name)(
File "/usr/local/lib/python3.10/site-packages/paddlex/utils/deps.py", line 195, in _wrapper
return old_init_func(self, *args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/pipelines/_parallel.py", line 103, in init
self._pipeline = self._create_internal_pipeline(config, self.device)
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/pipelines/_parallel.py", line 158, in _create_internal_pipeline
return self._pipeline_cls(
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/pipelines/seal_recognition/pipeline.py", line 95, in init
self.layout_det_model = self.create_model(
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/pipelines/base.py", line 99, in create_model
model = create_predictor(
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/models/init.py", line 77, in create_predictor
return BasePredictor.get(model_name)(
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/models/object_detection/predictor.py", line 112, in init
self.pre_ops, self.infer, self.post_op = self._build()
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/models/object_detection/predictor.py", line 143, in _build
infer = self.create_static_infer()
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/models/base/predictor/base_predictor.py", line 242, in create_static_infer
return HPInfer(
File "/usr/local/lib/python3.10/site-packages/paddlex/utils/deps.py", line 148, in _wrapper
return old_init_func(self, *args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/models/common/static_infer.py", line 574, in init
backend, backend_config = self._determine_backend_and_config()
File "/usr/local/lib/python3.10/site-packages/paddlex/inference/models/common/static_infer.py", line 629, in _determine_backend_and_config
raise RuntimeError(
RuntimeError: No inference backend and configuration could be suggested. Reason: 'tensorrt' is not a supported inference backend.
Beta Was this translation helpful? Give feedback.
All reactions