From dad1c2e45c920acb021632d058b043519b4d5bfd Mon Sep 17 00:00:00 2001 From: gpromote <30365806+GuoMonth@users.noreply.github.com> Date: Wed, 21 Aug 2024 10:32:11 +0800 Subject: [PATCH] Update generate_completion_from_local_llm return type Update generate_completion_from_local_llm return type --- llm_aided_ocr.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/llm_aided_ocr.py b/llm_aided_ocr.py index ea2c673..f7e792f 100644 --- a/llm_aided_ocr.py +++ b/llm_aided_ocr.py @@ -346,7 +346,7 @@ async def generate_completion_from_openai(prompt: str, max_tokens: int = 5000) - logging.error(f"An error occurred while requesting from OpenAI API: {e}") return None -async def generate_completion_from_local_llm(llm_model_name: str, input_prompt: str, number_of_tokens_to_generate: int = 100, temperature: float = 0.7, grammar_file_string: str = None): +async def generate_completion_from_local_llm(llm_model_name: str, input_prompt: str, number_of_tokens_to_generate: int = 100, temperature: float = 0.7, grammar_file_string: str = None) -> Optional[str]: logging.info(f"Starting text completion using model: '{llm_model_name}' for input prompt: '{input_prompt}'") llm = load_model(llm_model_name) prompt_tokens = estimate_tokens(input_prompt, llm_model_name) @@ -396,11 +396,7 @@ async def generate_completion_from_local_llm(llm_model_name: str, input_prompt: finish_reason = str(output['choices'][0]['finish_reason']) llm_model_usage_json = json.dumps(output['usage']) logging.info(f"Completed text completion in {output['usage']['total_time']:.2f} seconds. Beginning of generated text: \n'{generated_text[:150]}'...") - return { - "generated_text": generated_text, - "finish_reason": finish_reason, - "llm_model_usage_json": llm_model_usage_json - } + return generated_text # Image Processing Functions def preprocess_image(image):