2024-12-02 18:20:45 +08:00
|
|
|
|
import json
|
2024-12-02 20:31:46 +08:00
|
|
|
|
import logging
|
|
|
|
|
|
import time
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
2024-12-02 18:20:45 +08:00
|
|
|
|
import requests
|
2024-07-08 18:50:01 +08:00
|
|
|
|
from dashscope import Generation
|
2024-07-22 15:54:11 +08:00
|
|
|
|
from requests import RequestException
|
|
|
|
|
|
from retry import retry
|
|
|
|
|
|
|
|
|
|
|
|
from app.core.config import QWEN_API_KEY
|
2025-01-14 09:40:15 +08:00
|
|
|
|
from app.service.chat_robot.script.service.CallQWen import get_language
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
2024-12-02 20:31:46 +08:00
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
|
|
|
|
|
# os.environ["http_proxy"] = "http://127.0.0.1:7890"
|
|
|
|
|
|
# os.environ["https_proxy"] = "http://127.0.0.1:7890"
|
|
|
|
|
|
|
|
|
|
|
|
|
2024-07-08 18:50:01 +08:00
|
|
|
|
# llm = ChatOpenAI(model_name=OPENAI_MODEL,
|
|
|
|
|
|
# openai_api_key=OPENAI_API_KEY,
|
|
|
|
|
|
# temperature=0)
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
2024-12-02 20:13:33 +08:00
|
|
|
|
# prefix_for_llama = (
|
|
|
|
|
|
# """
|
|
|
|
|
|
# Translate everything within the brackets [] into English.
|
|
|
|
|
|
# Never translate or modify any English input.
|
|
|
|
|
|
# The input must be fully translated into coherent English sentences.
|
|
|
|
|
|
# Please only output the translated result.\n
|
|
|
|
|
|
# """
|
|
|
|
|
|
# )
|
2024-12-02 18:20:45 +08:00
|
|
|
|
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
|
|
|
|
|
def translate_to_en(text):
|
2024-12-02 20:13:33 +08:00
|
|
|
|
# template = (
|
|
|
|
|
|
# """You are a translation expert, proficient in various languages.
|
|
|
|
|
|
# And can translate various languages into English.
|
|
|
|
|
|
# Please translate to grammatically correct English regardless of the input language.
|
|
|
|
|
|
# If the input is already in English, or consists of letters or numbers such as "cat", "abc", or "1",
|
|
|
|
|
|
# output the input text exactly as it is without any modifications or additions.
|
|
|
|
|
|
# If there are grammatical errors, correct them and then output the sentence."""
|
|
|
|
|
|
# )
|
|
|
|
|
|
#
|
|
|
|
|
|
# prefix = (
|
|
|
|
|
|
# """
|
|
|
|
|
|
# Translate everything within the brackets [] into English.
|
|
|
|
|
|
# Never translate or modify any English input.
|
|
|
|
|
|
# The input must be fully translated into coherent English sentences.
|
|
|
|
|
|
# Never present the translation results in the format
|
|
|
|
|
|
# "The translation of \"Material suave\" into English would be \"Smooth material.\"". Instead, directly output "Smooth material".
|
|
|
|
|
|
# """
|
|
|
|
|
|
# )
|
2024-07-08 18:50:01 +08:00
|
|
|
|
messages = [
|
2024-07-23 17:44:56 +08:00
|
|
|
|
# {
|
|
|
|
|
|
# Translate the entire text and ensure the output is a complete and coherent sentence in English.
|
|
|
|
|
|
# "content": template, # 系统message
|
|
|
|
|
|
# "role": "system"
|
|
|
|
|
|
# },
|
2024-07-08 18:50:01 +08:00
|
|
|
|
{
|
|
|
|
|
|
# "content": input('请输入:'), # 用户message
|
2024-12-02 20:13:33 +08:00
|
|
|
|
"content": text, # 用户message
|
2024-07-08 18:50:01 +08:00
|
|
|
|
"role": "user"
|
|
|
|
|
|
}
|
|
|
|
|
|
]
|
|
|
|
|
|
first_response = get_response(messages)
|
|
|
|
|
|
assistant_output = first_response.output.choices[0].message
|
2024-07-23 17:44:56 +08:00
|
|
|
|
print("input : {}, translate result : {}".format(text, assistant_output.content))
|
2024-07-08 18:50:01 +08:00
|
|
|
|
return assistant_output.content
|
|
|
|
|
|
|
2024-12-02 18:20:45 +08:00
|
|
|
|
# llama3专用
|
|
|
|
|
|
# data = get_translation_from_llama3(text)
|
|
|
|
|
|
# translation = data
|
|
|
|
|
|
# # print("Response from llama3 : " + translation)
|
|
|
|
|
|
# return translation
|
|
|
|
|
|
|
2024-07-08 18:50:01 +08:00
|
|
|
|
|
2024-07-22 15:54:11 +08:00
|
|
|
|
@retry(exceptions=RequestException, tries=3, delay=1)
|
2024-07-08 18:50:01 +08:00
|
|
|
|
def get_response(messages):
|
|
|
|
|
|
response = Generation.call(
|
2024-07-23 17:44:56 +08:00
|
|
|
|
model='qwen-turbo',
|
2024-12-02 20:13:33 +08:00
|
|
|
|
api_key=QWEN_API_KEY,
|
2024-07-08 18:50:01 +08:00
|
|
|
|
messages=messages,
|
|
|
|
|
|
# seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234
|
|
|
|
|
|
result_format='message', # 将输出设置为message形式
|
|
|
|
|
|
enable_search='True'
|
2024-05-29 11:12:59 +08:00
|
|
|
|
)
|
2024-07-08 18:50:01 +08:00
|
|
|
|
return response
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
2024-12-02 18:20:45 +08:00
|
|
|
|
|
|
|
|
|
|
def get_translation_from_llama3(text):
|
2024-12-02 20:31:46 +08:00
|
|
|
|
start_time = time.time()
|
2024-12-02 20:13:33 +08:00
|
|
|
|
url = "http://10.1.1.240:11434/api/generate"
|
2024-12-02 18:20:45 +08:00
|
|
|
|
# url = "http://10.1.1.240:1143/api/generate"
|
|
|
|
|
|
|
2024-12-02 20:13:33 +08:00
|
|
|
|
# prompt = f"System: {prefix_for_llama}\nUser:[{text}]"
|
2024-12-02 18:20:45 +08:00
|
|
|
|
|
2025-01-14 09:40:15 +08:00
|
|
|
|
# 先获取用户输入文本的语言
|
2025-02-07 15:04:59 +08:00
|
|
|
|
# language = get_language(text)
|
2025-01-14 09:40:15 +08:00
|
|
|
|
|
2025-02-07 15:04:59 +08:00
|
|
|
|
# if 'English' in language:
|
|
|
|
|
|
# return text
|
2025-01-14 09:40:15 +08:00
|
|
|
|
|
|
|
|
|
|
# 创建请求的负载 translator是自定义的翻译模型
|
2024-12-02 18:20:45 +08:00
|
|
|
|
payload = {
|
2024-12-02 20:13:33 +08:00
|
|
|
|
"model": "translator",
|
|
|
|
|
|
"prompt": f"[{text}]",
|
2024-12-02 18:20:45 +08:00
|
|
|
|
"stream": False
|
|
|
|
|
|
}
|
|
|
|
|
|
# 将负载转换为 JSON 格式
|
|
|
|
|
|
headers = {'Content-Type': 'application/json'}
|
|
|
|
|
|
response = requests.post(url, data=json.dumps(payload), headers=headers)
|
|
|
|
|
|
# 处理响应
|
|
|
|
|
|
if response.status_code == 200:
|
|
|
|
|
|
# print("Response from server:")
|
|
|
|
|
|
# print(response.json())
|
|
|
|
|
|
resp = json.loads(response.content).get("response")
|
2024-12-02 20:31:46 +08:00
|
|
|
|
logger.info(f"translation server runtime is {time.time() - start_time} , response is {resp}")
|
2024-12-02 18:20:45 +08:00
|
|
|
|
print("input : {}, translate result : {}".format(text, resp))
|
|
|
|
|
|
return resp
|
|
|
|
|
|
else:
|
2024-12-02 20:31:46 +08:00
|
|
|
|
logger.info(f"translation server runtime is {time.time() - start_time} , response is {response.content}")
|
2024-12-02 18:20:45 +08:00
|
|
|
|
print(f"Request failed with status code {response.status_code}")
|
|
|
|
|
|
print(response.text)
|
|
|
|
|
|
|
|
|
|
|
|
|
2025-01-14 09:40:15 +08:00
|
|
|
|
# 在llama3中创建一个翻译模型
|
|
|
|
|
|
# def create_model_with_llama(text):
|
|
|
|
|
|
# url = "http://localhost:11434/api/create"
|
|
|
|
|
|
# # url = "http://10.1.1.240:1143/api/generate"
|
|
|
|
|
|
#
|
|
|
|
|
|
# # prompt = f"System: {prefix_for_llama}\nUser:[{text}]"
|
|
|
|
|
|
#
|
|
|
|
|
|
# # 创建翻译器的配置文件
|
|
|
|
|
|
# payload = {
|
|
|
|
|
|
# "model": "translator",
|
|
|
|
|
|
# "modelfile": "FROM llama3\nSYSTEM Translate everything within the brackets [] into English."
|
|
|
|
|
|
# "Never translate or modify any English input."
|
|
|
|
|
|
# "The input must be fully translated into coherent English sentences."
|
|
|
|
|
|
# }
|
|
|
|
|
|
#
|
|
|
|
|
|
# # 将负载转换为 JSON 格式
|
|
|
|
|
|
# headers = {'Content-Type': 'application/json'}
|
|
|
|
|
|
# response = requests.post(url, data=json.dumps(payload), headers=headers)
|
|
|
|
|
|
|
|
|
|
|
|
|
2024-05-29 11:12:59 +08:00
|
|
|
|
def main():
|
|
|
|
|
|
"""Main function"""
|
2024-12-02 20:13:33 +08:00
|
|
|
|
text = get_translation_from_llama3("[火焰]")
|
2024-06-28 13:59:53 +08:00
|
|
|
|
print(text)
|
2024-05-29 11:12:59 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
|
main()
|