代码拉取完成,页面将自动刷新
import argparse
from models.image_text_transformation import ImageTextTransformation
from fastapi import FastAPI
import base64
import uvicorn
import time
from PIL import Image
from io import BytesIO
app = FastAPI()
processor = None
def ssa(img_path):
"""
:param img_path:
:return: {"image":base64编码,"caption":[],"bbox":[]}
"""
region_semantic_result = processor.semantic_segment_anything(img_path)
return region_semantic_result
@app.post("/get-ssa-result")
def ssaAPI(data: dict):
"""
:param data:
:return: {"result": {"image":base64编码, "caption":[]}}
"""
image = data.get("image", None)
print('image received')
if image is not None:
img_binary = base64.b64decode(data.get("image").split(',')[-1])
img = Image.open(BytesIO(img_binary)).convert("RGB")
img_path = 'img_from_http/temp_{}.jpg'.format(str(time.time()))
img.save(img_path)
result = ssa(img_path)
return {"result": result}
if __name__ == '__main__':
print("*" * 50)
print("SSA Started!")
print("*" * 50)
parser = argparse.ArgumentParser()
parser.add_argument('--image_src', default='test/img6.jpg')
parser.add_argument('--out_image_name', default='test/result/_.jpg')
parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt4'], default='gpt-3.5-turbo')
parser.add_argument('--image_caption', action='store_true', dest='image_caption', default=True,
help='Set this flag to True if you want to use BLIP2 Image Caption')
parser.add_argument('--dense_caption', action='store_true', dest='dense_caption', default=True,
help='Set this flag to True if you want to use Dense Caption')
parser.add_argument('--semantic_segment', action='store_true', dest='semantic_segment', default=True,
help='Set this flag to True if you want to use semantic segmentation')
parser.add_argument('--sam_arch', choices=['vit_b', 'vit_l', 'vit_h'], dest='sam_arch', default='vit_h',
help='vit_b is the default model (fast but not accurate), vit_l and vit_h are larger models')
parser.add_argument('--captioner_base_model', choices=['blip', 'blip2'], dest='captioner_base_model',
default='blip2', help='blip2 requires 15G GPU memory, blip requires 6G GPU memory')
parser.add_argument('--region_classify_model', choices=['ssa', 'edit_anything'], dest='region_classify_model',
default='edit_anything',
help='Select the region classification model: edit anything is ten times faster than ssa, but less accurate.')
parser.add_argument('--image_caption_device', choices=['cuda', 'cpu'], default='cuda',
help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended')
parser.add_argument('--dense_caption_device', choices=['cuda', 'cpu'], default='cuda',
help='Select the device: cuda or cpu, < 6G GPU is not recommended>')
parser.add_argument('--semantic_segment_device', choices=['cuda', 'cpu'], default='cuda',
help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended. Make sue this model and image_caption model on same device.')
parser.add_argument('--contolnet_device', choices=['cuda', 'cpu'], default='cuda',
help='Select the device: cuda or cpu, <6G GPU is not recommended>')
args = parser.parse_args()
processor = ImageTextTransformation(args)
# uvicorn.run('test_ssa_API:app', host="172.18.29.60", port=8012, workers=4)
uvicorn.run(app, host="172.18.29.60", port=8012, workers=1)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。