import time from concurrent.futures import as_completed, ThreadPoolExecutor, wait import threading from .remove_bg_pixian import RemoveBgPiXian from .other.module_online_data import GetOnlineData from .deal_one_image import DealOneImage, DealOneImageBeforehand from .other.log import MyLogger from models import UnicornException import pandas as pd import csv from PIL import Image from io import BytesIO import os,requests def urlPilImage(url): yzmdata = requests.get(url) tempIm = BytesIO(yzmdata.content) im = Image.open(tempIm) return im def check_path(_path): if not os.path.exists(_path): os.mkdir(_path) return True class DealCutout: def __init__(self, token): super().__init__() self.lock = threading.Lock() self.need_cutout_images = {} self.token = token self.state = 2 # 1进行中 2停止 self.get_online_data = GetOnlineData(self.token) self.is_upload_pic_num = 0 self.is_deal_num = 0 self.output_type = 0 # 图片列表 self.upload_pic_dict = {} self.logger = MyLogger().logger def startDispose(self): self.get_online_data.refresh_headers() num = 0 result_array = [] save_root_path = "" for image_data in self.need_cutout_images: num += 1 save_root_path = image_data["root_path"] upload_pic_dict = {} upload_pic_dict = DealOneImageBeforehand( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ).run(upload_pic_dict) result = DealOneImage( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ).run(image_data, upload_pic_dict) result_array.append(result) return result_array, save_root_path def normalMode(self): """普通模式""" self.get_online_data.refresh_headers() num = 0 result_array = [] print("self.need_cutout_images", self.need_cutout_images) save_root_path = "" for image_data in self.need_cutout_images: num += 1 save_root_path = image_data["root_path"] result = DealOneImageBeforehand( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ).get_image_cut_noraml(image_data) result_array.append(result) return result_array, save_root_path class DealCloths: def __init__(self, token): super().__init__() self.lock = threading.Lock() self.need_cutout_images = {} self.token = token self.output_type = 0 self.state = 2 # 1进行中 2停止 self.get_online_data = GetOnlineData(self.token) self.is_upload_pic_num = 0 self.is_deal_num = 0 # 图片列表 self.upload_pic_dict = {} self.logger = MyLogger().logger def startDispose(self): self.get_online_data.refresh_headers() num = 0 result_array = [] save_root_path = "" for image_data in self.need_cutout_images: num += 1 save_root_path = image_data["root_path"] upload_pic_dict = {} hand = DealOneImageBeforehand( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ) upload_pic_dict = hand.get_image_cut_cloths(image_data) result_array.append(upload_pic_dict) return result_array, save_root_path class DealModelForm: '''处理人台抠图''' csvName = 'record.csv' def __init__(self, token,params): super().__init__() self.lock = threading.Lock() self.need_cutout_images = {} self.token = token self.output_type = 0 self.state = 2 # 1进行中 2停止 self.get_online_data = GetOnlineData(self.token) self.is_upload_pic_num = 0 self.is_deal_num = 0 self.params = params # 图片列表 self.upload_pic_dict = {} self.logger = MyLogger().logger def addData2Csv(self,data): name_list = ['file_name', 'file_e', 'file_path', 'file','root_path','need_cutout','image_url','generate_id','status'] isExist = os.path.exists(self.csvName) csvfile = open(self.csvName,"a") writer = csv.writer(csvfile) #先写入columns_name if isExist == False: writer.writerow(name_list) writer.writerows(data) csvfile.close() def startDispose(self): self.get_online_data.refresh_headers() num = 0 save_root_path = "" baseImages = [] for index,image_data in enumerate(self.need_cutout_images): num += 1 save_root_path = image_data["root_path"] file_path = image_data["file_path"] image_url = self.get_online_data.upload_pic(file_path) baseImages.append(image_url) self.need_cutout_images[index]["image_url"] = image_url data = { "base_image":baseImages, "out_width":self.params.out_width, "out_height":self.params.out_height, "size_mode":self.params.size_mode, "output_mode":self.params.output_mode, } result_json = self.get_online_data.model_form_segment(data) generate_ids = result_json.get("generate_ids") saveParams = [] for idx,id in enumerate(generate_ids): self.need_cutout_images[idx]['generate_id'] = id # ['file_name', 'file_e', 'file_path', # 'file','root_path','need_cutout','image_url','generate_id','status'] item = self.need_cutout_images[idx] saveParams.append([item['file_name'], item['file_e'], item['file_path'], item['file'], item['root_path'], item['need_cutout'], item['image_url'], item['generate_id'], False, ]) self.addData2Csv(saveParams) return self.need_cutout_images, save_root_path,generate_ids def search_progress(self): try: csvData = pd.read_csv(self.csvName) except FileNotFoundError as e: raise UnicornException("不存在生成记录,请先提交抠人台抠图任务") '''进度查询''' print("self.params",self.params) search_generate_ids = self.params.generate_ids dataParams = { "generate_ids":search_generate_ids, "type":self.params.type, } responseData = self.get_online_data.search_progress(dataParams) generate_ids = csvData.loc[csvData['generate_id'].isin(search_generate_ids)] successCount = 0 failCount = 0 is_finished = False root_path = "" if len(generate_ids) > 0: root_path = generate_ids.loc[0]["root_path"] save_path = f"{root_path}/已扣图" check_path(save_path) else: return is_finished,successCount,failCount,root_path for idx,generate in generate_ids.iterrows(): for respItem in responseData: if generate["generate_id"]!=respItem["id"]: continue status = respItem['status'] print("status",status) if status == -1: failCount+=1 csvData.drop(csvData.loc[csvData['generate_id'] == generate["generate_id"]].index,inplace=True) break if status == 2: successCount+=1 result_image_url = respItem['result_image_urls'][0] result_image_pil = urlPilImage(result_image_url) root_path = generate["root_path"] file_name = generate["file_name"] file_e = generate["file_e"] if result_image_pil.mode == 'RGBA': result_image_pil.save(f"{save_path}/{file_name}.png") else: result_image_pil.save(f"{save_path}/{file_name}.jpg") csvData.drop(csvData.loc[csvData['generate_id'] == generate["generate_id"]].index,inplace=True) break csvData.to_csv(self.csvName, index=False) is_finished = True if len(search_generate_ids) == (successCount+failCount) else False return is_finished,successCount,failCount,root_path