调用llama3.2-vision,自动识别图片,输出prompt
This commit is contained in:
@@ -9,6 +9,7 @@ from retry import retry
|
||||
|
||||
from app.core.config import QWEN_API_KEY
|
||||
from app.service.chat_robot.script.service.CallQWen import get_language
|
||||
from app.service.prompt_generation.util import minio_util
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -143,6 +144,38 @@ def get_translation_from_llama3(text):
|
||||
# response = requests.post(url, data=json.dumps(payload), headers=headers)
|
||||
|
||||
|
||||
def get_prompt_from_image(image_path, text):
|
||||
start_time = time.time()
|
||||
# url = "http://localhost:11434/api/generate"
|
||||
url = "http://10.1.1.243:11434/api/generate"
|
||||
|
||||
image_base64 = minio_util.minio_url_to_base64(image_path.img)
|
||||
# image_base64 = minio_url_to_base64(image_path)
|
||||
|
||||
# 创建请求的负载 translator是自定义的翻译模型
|
||||
payload = {
|
||||
"model": "llama3.2-vision",
|
||||
"images": [image_base64],
|
||||
"prompt": f"{text}",
|
||||
"stream": False
|
||||
}
|
||||
# 将负载转换为 JSON 格式
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
response = requests.post(url, data=json.dumps(payload), headers=headers)
|
||||
# 处理响应
|
||||
if response.status_code == 200:
|
||||
# print("Response from server:")
|
||||
# print(response.json())
|
||||
resp = json.loads(response.content).get("response")
|
||||
logger.info(f"sketch re-generate server runtime is {time.time() - start_time} \n, response is {resp}")
|
||||
# print("input : {}, sketch re-generate result : {}".format(text, resp))
|
||||
return resp
|
||||
else:
|
||||
logger.info(f"sketch re-generate server runtime is {time.time() - start_time} , response is {response.content}")
|
||||
print(f"Request failed with status code {response.status_code}")
|
||||
print(response.text)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
text = get_translation_from_llama3("[火焰]")
|
||||
|
||||
Reference in New Issue
Block a user