diff --git a/app/schemas/design.py b/app/schemas/design.py index 6f0a633..9a3d80d 100644 --- a/app/schemas/design.py +++ b/app/schemas/design.py @@ -7,9 +7,9 @@ class SAMRequestModel(BaseModel): user_id: int = Field(..., description="用户id, 必填字段") image_path: str = Field(..., description="图片路径,必填字段") type: str = Field(..., description="推理类型,必填字段") - points: Optional[List[List[float]]] = None - labels: Optional[List[int]] = None - box: Optional[List[int]] = None + points: Optional[List[List[float]]] | None = None + labels: Optional[List[int]] | None = None + box: Optional[List[int]] | None = None class DesignModel(BaseModel): diff --git a/app/service/prompt_generation/chatgpt_for_translation.py b/app/service/prompt_generation/chatgpt_for_translation.py index 2214241..1ef3111 100644 --- a/app/service/prompt_generation/chatgpt_for_translation.py +++ b/app/service/prompt_generation/chatgpt_for_translation.py @@ -90,7 +90,7 @@ def get_response(messages): def get_translation_from_llama3(text): start_time = time.time() - url = f"http://{settings.A6000_SERVICE_HOST}:11434/api/generate" + url = f"http://{settings.A6000_SERVICE_HOST}:12434/api/generate" # url = "http://10.1.1.240:1143/api/generate" # prompt = f"System: {prefix_for_llama}\nUser:[{text}]" @@ -103,8 +103,8 @@ def get_translation_from_llama3(text): # 创建请求的负载 translator是自定义的翻译模型 payload = { - "model": "translator", - "prompt": f"[{text}]", + "model": "AiDA-translator:latest", + "prompt": text, "stream": False } # 将负载转换为 JSON 格式 @@ -180,7 +180,7 @@ def get_prompt_from_image(image_path, text): def main(): """Main function""" - text = get_translation_from_llama3("[火焰]") + text = get_translation_from_llama3("火焰") print(text)