diff --git a/app/api/api_prompt_generation.py b/app/api/api_prompt_generation.py index c227b07..59e5779 100644 --- a/app/api/api_prompt_generation.py +++ b/app/api/api_prompt_generation.py @@ -26,7 +26,7 @@ def prompt_generation(request_data: PromptGenerationImageModel): """ try: logger.info(f"prompt_generation request item is : @@@@@@:{request_data}") - data = translate_to_en(request_data.text) + data = translate_to_en("[" + request_data.text + "]") logger.info(f"prompt_generation response @@@@@@:{data}") except Exception as e: logger.warning(f"prompt_generation Run Exception @@@@@@:{e}") diff --git a/app/service/prompt_generation/chatgpt_for_translation.py b/app/service/prompt_generation/chatgpt_for_translation.py index fcf8ec5..193bcfc 100644 --- a/app/service/prompt_generation/chatgpt_for_translation.py +++ b/app/service/prompt_generation/chatgpt_for_translation.py @@ -25,27 +25,38 @@ def translate_to_en(text): output the input text exactly as it is without any modifications or additions. If there are grammatical errors, correct them and then output the sentence.""" ) + + prefix = ( + """ + Translate everything within the brackets [] into English. + Never translate or modify any English input. + The input must be fully translated into coherent English sentences. + Never present the translation results in the format + "The translation of \"Material suave\" into English would be \"Smooth material.\"". Instead, directly output "Smooth material". + """ + ) messages = [ - { - "content": template, # 系统message - "role": "system" - }, + # { + # Translate the entire text and ensure the output is a complete and coherent sentence in English. + # "content": template, # 系统message + # "role": "system" + # }, { # "content": input('请输入:'), # 用户message - "content": text, # 用户message + "content": prefix + text, # 用户message "role": "user" } ] first_response = get_response(messages) assistant_output = first_response.output.choices[0].message - print("translate result : {}".format(assistant_output)) + print("input : {}, translate result : {}".format(text, assistant_output.content)) return assistant_output.content @retry(exceptions=RequestException, tries=3, delay=1) def get_response(messages): response = Generation.call( - model='qwen-max', + model='qwen-turbo', api_key= QWEN_API_KEY, messages=messages, # seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234