Files
AiDA_Python/app/service/prompt_generation/chatgpt_for_translation.py

187 lines
6.6 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import json
import logging
import time
import requests
from dashscope import Generation
from requests import RequestException
from retry import retry
from app.core.config import QWEN_API_KEY
from app.service.chat_robot.script.service.CallQWen import get_language
from app.service.prompt_generation.util import minio_util
logger = logging.getLogger(__name__)
# os.environ["http_proxy"] = "http://127.0.0.1:7890"
# os.environ["https_proxy"] = "http://127.0.0.1:7890"
# llm = ChatOpenAI(model_name=OPENAI_MODEL,
# openai_api_key=OPENAI_API_KEY,
# temperature=0)
# prefix_for_llama = (
# """
# Translate everything within the brackets [] into English.
# Never translate or modify any English input.
# The input must be fully translated into coherent English sentences.
# Please only output the translated result.\n
# """
# )
def translate_to_en(text):
# template = (
# """You are a translation expert, proficient in various languages.
# And can translate various languages into English.
# Please translate to grammatically correct English regardless of the input language.
# If the input is already in English, or consists of letters or numbers such as "cat", "abc", or "1",
# output the input text exactly as it is without any modifications or additions.
# If there are grammatical errors, correct them and then output the sentence."""
# )
#
# prefix = (
# """
# Translate everything within the brackets [] into English.
# Never translate or modify any English input.
# The input must be fully translated into coherent English sentences.
# Never present the translation results in the format
# "The translation of \"Material suave\" into English would be \"Smooth material.\"". Instead, directly output "Smooth material".
# """
# )
messages = [
# {
# Translate the entire text and ensure the output is a complete and coherent sentence in English.
# "content": template, # 系统message
# "role": "system"
# },
{
# "content": input('请输入:'), # 用户message
"content": text, # 用户message
"role": "user"
}
]
first_response = get_response(messages)
assistant_output = first_response.output.choices[0].message
print("input : {}, translate result : {}".format(text, assistant_output.content))
return assistant_output.content
# llama3专用
# data = get_translation_from_llama3(text)
# translation = data
# # print("Response from llama3 : " + translation)
# return translation
@retry(exceptions=RequestException, tries=3, delay=1)
def get_response(messages):
response = Generation.call(
model='qwen-turbo',
api_key=QWEN_API_KEY,
messages=messages,
# seed=random.randint(1, 10000), # 设置随机数种子seed如果没有设置则随机数种子默认为1234
result_format='message', # 将输出设置为message形式
enable_search='True'
)
return response
def get_translation_from_llama3(text):
start_time = time.time()
# url = "http://10.1.1.240:11434/api/generate"
url = "http://10.1.1.243:11434/api/generate"
# prompt = f"System: {prefix_for_llama}\nUser:[{text}]"
# 先获取用户输入文本的语言
# language = get_language(text)
# if 'English' in language:
# return text
# 创建请求的负载 translator是自定义的翻译模型
payload = {
"model": "zcr_gemma3_translator:4b",
"prompt": f"[{text}]",
"stream": False
}
# 将负载转换为 JSON 格式
headers = {'Content-Type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers)
# 处理响应
if response.status_code == 200:
# print("Response from server:")
# print(response.json())
resp = json.loads(response.content).get("response")
logger.info(f"translation server runtime is {time.time() - start_time} , response is {resp}")
print("input : {}, translate result : {}".format(text, resp))
return resp
else:
logger.info(f"translation server runtime is {time.time() - start_time} , response is {response.content}")
print(f"Request failed with status code {response.status_code}")
print(response.text)
# 在llama3中创建一个翻译模型
# def create_model_with_llama(text):
# url = "http://localhost:11434/api/create"
# # url = "http://10.1.1.240:1143/api/generate"
#
# # prompt = f"System: {prefix_for_llama}\nUser:[{text}]"
#
# # 创建翻译器的配置文件
# payload = {
# "model": "translator",
# "modelfile": "FROM llama3\nSYSTEM Translate everything within the brackets [] into English."
# "Never translate or modify any English input."
# "The input must be fully translated into coherent English sentences."
# }
#
# # 将负载转换为 JSON 格式
# headers = {'Content-Type': 'application/json'}
# response = requests.post(url, data=json.dumps(payload), headers=headers)
def get_prompt_from_image(image_path, text):
start_time = time.time()
# url = "http://localhost:11434/api/generate"
url = "http://10.1.1.243:11434/api/generate"
image_base64 = minio_util.minio_url_to_base64(image_path.img)
# image_base64 = minio_url_to_base64(image_path)
# 创建请求的负载 translator是自定义的翻译模型
payload = {
"model": "llama3.2-vision",
"images": [image_base64],
"prompt": f"{text}",
"stream": False
}
# 将负载转换为 JSON 格式
headers = {'Content-Type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers)
# 处理响应
if response.status_code == 200:
# print("Response from server:")
# print(response.json())
resp = json.loads(response.content).get("response")
logger.info(f"sketch re-generate server runtime is {time.time() - start_time} \n, response is {resp}")
# print("input : {}, sketch re-generate result : {}".format(text, resp))
return resp
else:
logger.info(f"sketch re-generate server runtime is {time.time() - start_time} , response is {response.content}")
print(f"Request failed with status code {response.status_code}")
print(response.text)
def main():
"""Main function"""
text = get_translation_from_llama3("[123]")
print(text)
if __name__ == "__main__":
main()