2025-12-30 16:49:08 +08:00
|
|
|
|
from langchain_classic.output_parsers import ResponseSchema, StructuredOutputParser
|
2025-05-15 16:40:58 +08:00
|
|
|
|
from langchain_community.chat_models import ChatTongyi
|
|
|
|
|
|
from langchain_core.prompts import PromptTemplate
|
|
|
|
|
|
|
|
|
|
|
|
from app.schemas.project_info_extraction import ProjectInfoExtractionModel
|
|
|
|
|
|
|
|
|
|
|
|
style = ['NEW_CHINESE', 'COUNTRY_STYLE', 'FUTURISM', 'MINIMALISM', 'LOLITA', 'Y2K', 'BUSINESS', 'MERLAD',
|
|
|
|
|
|
'OUTDOOR_FUNCTIONAL', 'ROCK', 'DOPAMINE', 'GOTHIC', 'POST_APOCALYPTIC', 'ROMANTIC', 'WABI_SABI']
|
|
|
|
|
|
position = ['Overall', 'Tops', 'Bottoms', 'Outwear', 'Blouse', 'Dress', 'Trousers', 'Skirt']
|
|
|
|
|
|
gender = ['Female', 'Male']
|
|
|
|
|
|
age_group = ['Adult', 'Child']
|
|
|
|
|
|
process = ['SERIES_DESIGN', 'SINGLE_DESIGN']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ProjectInfoExtraction:
|
|
|
|
|
|
def __init__(self, request_data):
|
|
|
|
|
|
# llm generate brand info init
|
2025-06-30 11:29:19 +08:00
|
|
|
|
if len(request_data.image_list) or len(request_data.file_list):
|
|
|
|
|
|
self.model = ChatTongyi(model="qwen-vl-plus", api_key="sk-7658298c6b99443c98184a5e634fe6ab")
|
|
|
|
|
|
else:
|
|
|
|
|
|
self.model = ChatTongyi(model="qwen2.5-14b-instruct", api_key="sk-7658298c6b99443c98184a5e634fe6ab")
|
2025-05-15 16:40:58 +08:00
|
|
|
|
|
|
|
|
|
|
self.response_schemas = [
|
|
|
|
|
|
ResponseSchema(name="project_name", description="项目的名称."),
|
|
|
|
|
|
ResponseSchema(name="process", description="项目的类型,单品还是系列."),
|
|
|
|
|
|
ResponseSchema(name="ageGroup", description="项目设计服装的受众对象."),
|
|
|
|
|
|
ResponseSchema(name="gender", description="项目设计服装的受众性别."),
|
|
|
|
|
|
ResponseSchema(name="position", description="项目单品设计的部位."),
|
|
|
|
|
|
ResponseSchema(name="style", description="项目的设计风格.")
|
|
|
|
|
|
]
|
|
|
|
|
|
self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas)
|
|
|
|
|
|
self.format_instructions = self.output_parser.get_format_instructions()
|
|
|
|
|
|
self.prompt = PromptTemplate(
|
|
|
|
|
|
template="你是一个时装品牌的设计师助理。根据用户输入提取出"
|
|
|
|
|
|
"[project_name] : 项目的名称,"
|
|
|
|
|
|
f"[process] : 项目的类型,从{process}选择."
|
|
|
|
|
|
f"[ageGroup] : 服装的受众,从{age_group}选择."
|
|
|
|
|
|
f"[gender] : 服装的适用性别,从{gender}选择"
|
|
|
|
|
|
f"[position] : single_design的部位,如果[process]是SINGLE_DESIGN,从{position}中选择,如果[process]是SERIES_DESIGN,这项为空"
|
|
|
|
|
|
f"[style] : 设计的风格,从{style}中选择"
|
|
|
|
|
|
".\n{format_instructions}\n{question}",
|
|
|
|
|
|
input_variables=["question"],
|
|
|
|
|
|
partial_variables={"format_instructions": self.format_instructions}
|
|
|
|
|
|
)
|
|
|
|
|
|
self._input = self.prompt.format_prompt(question=request_data.prompt)
|
|
|
|
|
|
|
|
|
|
|
|
self.result_data = {}
|
|
|
|
|
|
|
|
|
|
|
|
def get_result(self):
|
|
|
|
|
|
self.llm_extraction_project_info()
|
|
|
|
|
|
return self.result_data
|
|
|
|
|
|
|
|
|
|
|
|
def llm_extraction_project_info(self):
|
2025-12-30 16:49:08 +08:00
|
|
|
|
output = self.model
|
2025-05-15 16:40:58 +08:00
|
|
|
|
project_info = self.output_parser.parse(output.content)
|
|
|
|
|
|
self.result_data = project_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
|
request_data = ProjectInfoExtractionModel(
|
2025-06-30 11:29:19 +08:00
|
|
|
|
prompt="性别为儿童",
|
|
|
|
|
|
image_list=[
|
|
|
|
|
|
'https://www.minio-api.aida.com.hk/test/019aaeed-3227-11f0-a194-0826ae3ad6b3.jpg?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=vXKFLSJkYeEq2DrSZvkB%2F20250613%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250613T020236Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=a513b706c24134071a489c34f0fa2c0f510e871b8589dc0c08a0f26ea28ee2ff'
|
|
|
|
|
|
],
|
|
|
|
|
|
file_list=[]
|
2025-05-15 16:40:58 +08:00
|
|
|
|
)
|
|
|
|
|
|
service = ProjectInfoExtraction(request_data)
|
|
|
|
|
|
print(service.get_result())
|