122 lines
3.9 KiB
Python
122 lines
3.9 KiB
Python
import json
|
||
|
||
import requests
|
||
from dashscope import Generation
|
||
from requests import RequestException
|
||
from retry import retry
|
||
|
||
from app.core.config import QWEN_API_KEY
|
||
|
||
|
||
# os.environ["http_proxy"] = "http://127.0.0.1:7890"
|
||
# os.environ["https_proxy"] = "http://127.0.0.1:7890"
|
||
|
||
|
||
# llm = ChatOpenAI(model_name=OPENAI_MODEL,
|
||
# openai_api_key=OPENAI_API_KEY,
|
||
# temperature=0)
|
||
|
||
# prefix_for_llama = (
|
||
# """
|
||
# Translate everything within the brackets [] into English.
|
||
# Never translate or modify any English input.
|
||
# The input must be fully translated into coherent English sentences.
|
||
# Please only output the translated result.\n
|
||
# """
|
||
# )
|
||
|
||
|
||
def translate_to_en(text):
|
||
# template = (
|
||
# """You are a translation expert, proficient in various languages.
|
||
# And can translate various languages into English.
|
||
# Please translate to grammatically correct English regardless of the input language.
|
||
# If the input is already in English, or consists of letters or numbers such as "cat", "abc", or "1",
|
||
# output the input text exactly as it is without any modifications or additions.
|
||
# If there are grammatical errors, correct them and then output the sentence."""
|
||
# )
|
||
#
|
||
# prefix = (
|
||
# """
|
||
# Translate everything within the brackets [] into English.
|
||
# Never translate or modify any English input.
|
||
# The input must be fully translated into coherent English sentences.
|
||
# Never present the translation results in the format
|
||
# "The translation of \"Material suave\" into English would be \"Smooth material.\"". Instead, directly output "Smooth material".
|
||
# """
|
||
# )
|
||
messages = [
|
||
# {
|
||
# Translate the entire text and ensure the output is a complete and coherent sentence in English.
|
||
# "content": template, # 系统message
|
||
# "role": "system"
|
||
# },
|
||
{
|
||
# "content": input('请输入:'), # 用户message
|
||
"content": text, # 用户message
|
||
"role": "user"
|
||
}
|
||
]
|
||
first_response = get_response(messages)
|
||
assistant_output = first_response.output.choices[0].message
|
||
print("input : {}, translate result : {}".format(text, assistant_output.content))
|
||
return assistant_output.content
|
||
|
||
# llama3专用
|
||
# data = get_translation_from_llama3(text)
|
||
# translation = data
|
||
# # print("Response from llama3 : " + translation)
|
||
# return translation
|
||
|
||
|
||
@retry(exceptions=RequestException, tries=3, delay=1)
|
||
def get_response(messages):
|
||
response = Generation.call(
|
||
model='qwen-turbo',
|
||
api_key=QWEN_API_KEY,
|
||
messages=messages,
|
||
# seed=random.randint(1, 10000), # 设置随机数种子seed,如果没有设置,则随机数种子默认为1234
|
||
result_format='message', # 将输出设置为message形式
|
||
enable_search='True'
|
||
)
|
||
return response
|
||
|
||
|
||
def get_translation_from_llama3(text):
|
||
url = "http://10.1.1.240:11434/api/generate"
|
||
# url = "http://10.1.1.240:1143/api/generate"
|
||
|
||
# prompt = f"System: {prefix_for_llama}\nUser:[{text}]"
|
||
|
||
# 创建请求的负载
|
||
payload = {
|
||
"model": "translator",
|
||
"prompt": f"[{text}]",
|
||
"stream": False
|
||
}
|
||
|
||
# 将负载转换为 JSON 格式
|
||
headers = {'Content-Type': 'application/json'}
|
||
response = requests.post(url, data=json.dumps(payload), headers=headers)
|
||
|
||
# 处理响应
|
||
if response.status_code == 200:
|
||
# print("Response from server:")
|
||
# print(response.json())
|
||
resp = json.loads(response.content).get("response")
|
||
print("input : {}, translate result : {}".format(text, resp))
|
||
return resp
|
||
else:
|
||
print(f"Request failed with status code {response.status_code}")
|
||
print(response.text)
|
||
|
||
|
||
def main():
|
||
"""Main function"""
|
||
text = get_translation_from_llama3("[火焰]")
|
||
print(text)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|