openai 替换为 通义千问

This commit is contained in:
2024-07-08 18:50:01 +08:00
parent d772adcd7a
commit 8ad3e8ac0f
8 changed files with 412 additions and 89 deletions

View File

@@ -1,8 +1,11 @@
import logging
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from dashscope import Generation
# from langchain.chains import LLMChain
from langchain_community.chat_models import QianfanChatEndpoint, ChatTongyi
# from langchain.chat_models import ChatOpenAI
from langchain_core.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import RunnableSequence
from app.core.config import OPENAI_MODEL, OPENAI_API_KEY
@@ -10,9 +13,9 @@ from app.core.config import OPENAI_MODEL, OPENAI_API_KEY
# os.environ["https_proxy"] = "http://127.0.0.1:7890"
llm = ChatOpenAI(model_name=OPENAI_MODEL,
openai_api_key=OPENAI_API_KEY,
temperature=0)
# llm = ChatOpenAI(model_name=OPENAI_MODEL,
# openai_api_key=OPENAI_API_KEY,
# temperature=0)
def translate_to_en(text):
@@ -24,48 +27,34 @@ def translate_to_en(text):
output the input text exactly as it is without any modifications or additions.
If there are grammatical errors, correct them and then output the sentence."""
)
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
messages = [
{
"content": template, # 系统message
"role": "system"
},
{
# "content": input('请输入:'), # 用户message
"content": text, # 用户message
"role": "user"
}
]
first_response = get_response(messages)
assistant_output = first_response.output.choices[0].message
print("translate result : {}".format(assistant_output))
return assistant_output.content
# 待翻译文本由 Human 角色输入
human_template = "User input : {text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(input_variables=["text"], template=human_template)
# 使用 System 和 Human 角色的提示模板构造 ChatPromptTemplate
chat_prompt_template = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
def get_response(messages):
response = Generation.call(
model='qwen-max',
api_key='sk-7658298c6b99443c98184a5e634fe6ab',
messages=messages,
# seed=random.randint(1, 10000), # 设置随机数种子seed如果没有设置则随机数种子默认为1234
result_format='message', # 将输出设置为message形式
enable_search='True'
)
translate_chain = LLMChain(llm=llm, prompt=chat_prompt_template)
result = translate_chain.invoke(text)
logging.info("translate result : " + result.get('text'))
# print("translate result : " + result.get('text'))
return result.get('text')
# template = (
# """
# Input sentence:
# {translate}
# 1. Based on the input,adjust the input sentence to make it more suitable for prompts for generating images,
# ensuring all key nouns or adjectives related to the image are retained.
# 2. Simplify complex sentence structures and clarify ambiguous expressions.
# 3. Only Output the adjusted English sentence.
#
# Output :
# """
# )
# # "Based on the input sentence, extract key adjectives and nouns.Only Output extracted key words."
# # 1. Check if the input sentence contains any grammatical errors. If there are errors, please correct them before proceeding.
#
# prompt_template = PromptTemplate(input_variables=["translate"], template=template)
# prompt_chain = LLMChain(llm=llm, prompt=prompt_template)
#
# from langchain.chains import SimpleSequentialChain
# overall_chain = SimpleSequentialChain(chains=[translate_chain, prompt_chain], verbose=True)
#
# response = overall_chain.run(text)
# return response
return response
def main():
"""Main function"""