diff --git a/app/core/config.py b/app/core/config.py index 584c873..59c9820 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -128,6 +128,8 @@ OLLAMA_URL = f"http://{settings.A6000_SERVICE_HOST}:11434/api/embeddings" # Design DESIGN_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10000' DESIGN_MODEL_NAME = 'seg_knet' +# Seg Product +SEG_PRODUCT_MODEL_URL = f'{settings.B_4_X_4090_SERVICE_HOST}:3000' # Generate Image GI_MODEL_URL = f'{settings.A6000_SERVICE_HOST}:10061' GI_MODEL_NAME = 'flux' diff --git a/app/service/brand_dna/service.py b/app/service/brand_dna/service.py index 148e1e9..350f395 100644 --- a/app/service/brand_dna/service.py +++ b/app/service/brand_dna/service.py @@ -9,7 +9,7 @@ import torch.nn.functional as F import tritonclient.http as httpclient from minio import Minio -from app.core.config import DESIGN_MODEL_URL +from app.core.config import DESIGN_MODEL_URL, SEG_PRODUCT_MODEL_URL from app.core.config import settings from app.schemas.brand_dna import BrandDnaModel from app.service.attribute.config import const @@ -29,7 +29,7 @@ class BrandDna: self.attr_type = pd.read_csv(settings.CATEGORY_PATH) # self.attr_type = pd.read_csv(r"E:\workspace\trinity_client_aida\app\service\attribute\config\descriptor\category\category_dis.csv") self.att_client = httpclient.InferenceServerClient(url=DESIGN_MODEL_URL) - self.seg_client = httpclient.InferenceServerClient(url='10.1.1.243:30000') + self.seg_client = httpclient.InferenceServerClient(url=SEG_PRODUCT_MODEL_URL) self.const = const # self.const = local_debug_const diff --git a/app/service/clothing_seg/service.py b/app/service/clothing_seg/service.py index e3cf83c..66f911c 100644 --- a/app/service/clothing_seg/service.py +++ b/app/service/clothing_seg/service.py @@ -23,7 +23,7 @@ class ClothingSeg: def __init__(self, request_data): self.image_data = request_data.image_data self.user_id = request_data.user_id - self.triton_client = grpcclient.InferenceServerClient(url="10.1.1.243:10071") + self.triton_client = grpcclient.InferenceServerClient(url=f"{settings.B_4_X_4090_SERVICE_HOST}:10071") @RunTime def get_result(self): @@ -139,7 +139,7 @@ def get_bounding_box(mask): if __name__ == "__main__": test_data = ClothingSegModel( - user_id=89, + user_id="89", image_data=[ # { # "image_url": "test/clothing_seg/dress.jpg", diff --git a/app/service/prompt_generation/chatgpt_for_translation.py b/app/service/prompt_generation/chatgpt_for_translation.py index 79d17b6..2214241 100644 --- a/app/service/prompt_generation/chatgpt_for_translation.py +++ b/app/service/prompt_generation/chatgpt_for_translation.py @@ -148,7 +148,7 @@ def get_translation_from_llama3(text): def get_prompt_from_image(image_path, text): start_time = time.time() # url = "http://localhost:11434/api/generate" - url = "http://10.1.1.243:11434/api/generate" + url = f"http://{settings.B_4_X_4090_SERVICE_HOST}:11434/api/generate" image_base64 = minio_util.minio_url_to_base64(image_path.img) # image_base64 = minio_url_to_base64(image_path)