import time from concurrent.futures import as_completed, ThreadPoolExecutor, wait import threading from .remove_bg_pixian import RemoveBgPiXian, Picture from .other.module_online_data import GetOnlineData from .deal_one_image import DealOneImage, DealOneImageBeforehand from .other.log import MyLogger from models import UnicornException import pandas as pd import csv from PIL import Image from io import BytesIO import os, requests, io def urlPilImage(url): yzmdata = requests.get(url) tempIm = BytesIO(yzmdata.content) im = Image.open(tempIm) return im def check_path(_path): if not os.path.exists(_path): os.mkdir(_path) return True class DealCutout: def __init__(self, token): super().__init__() self.lock = threading.Lock() self.need_cutout_images = {} self.token = token self.state = 2 # 1进行中 2停止 self.get_online_data = GetOnlineData(self.token) self.is_upload_pic_num = 0 self.is_deal_num = 0 self.output_type = 0 # 图片列表 self.upload_pic_dict = {} self.logger = MyLogger().logger def startDispose(self): self.get_online_data.refresh_headers() num = 0 result_array = [] save_root_path = "" for image_data in self.need_cutout_images: num += 1 save_root_path = image_data["root_path"] upload_pic_dict = {} upload_pic_dict = DealOneImageBeforehand( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ).run(upload_pic_dict) result = DealOneImage( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ).run(image_data, upload_pic_dict) result_array.append(result) return result_array, save_root_path def normalMode(self): """普通模式""" self.get_online_data.refresh_headers() num = 0 result_array = [] print("self.need_cutout_images", self.need_cutout_images) save_root_path = "" for image_data in self.need_cutout_images: if image_data["need_cutout"] == False: continue num += 1 save_root_path = image_data["root_path"] result = DealOneImageBeforehand( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ).get_image_cut_noraml(image_data) result_array.append(result) return result_array, save_root_path class DealCloths: def __init__(self, token): super().__init__() self.lock = threading.Lock() self.need_cutout_images = {} self.token = token self.output_type = 0 self.state = 2 # 1进行中 2停止 self.get_online_data = GetOnlineData(self.token) self.is_upload_pic_num = 0 self.is_deal_num = 0 # 图片列表 self.upload_pic_dict = {} self.logger = MyLogger().logger def startDispose(self): self.get_online_data.refresh_headers() num = 0 result_array = [] save_root_path = "" for image_data in self.need_cutout_images: if image_data["need_cutout"] == False: continue num += 1 save_root_path = image_data["root_path"] upload_pic_dict = {} hand = DealOneImageBeforehand( image_data=image_data, lock=self.lock, windows=self, num=num, token=self.token, ) upload_pic_dict = hand.get_image_cut_cloths(image_data) result_array.append(upload_pic_dict) return result_array, save_root_path class DealModelForm: """处理人台抠图""" csvName = "record.csv" def __init__(self, token, params): super().__init__() self.lock = threading.Lock() self.need_cutout_images = {} self.token = token self.output_type = 0 self.state = 2 # 1进行中 2停止 self.get_online_data = GetOnlineData(self.token) self.is_upload_pic_num = 0 self.is_deal_num = 0 self.params = params # 图片列表 self.upload_pic_dict = {} self.logger = MyLogger().logger def addData2Csv(self, data): name_list = [ "file_name", "file_e", "file_path", "file", "root_path", "need_cutout", "image_url", "generate_id", "status", ] isExist = os.path.exists(self.csvName) csvfile = open(self.csvName, "a", encoding="utf-8-sig") writer = csv.writer(csvfile) # 先写入columns_name if isExist == False: writer.writerow(name_list) writer.writerows(data) csvfile.close() def startDispose(self): self.get_online_data.refresh_headers() num = 0 baseImages = [] resize = 1600 # 定义标准 root_path_list = [] for index, image_data in enumerate(self.need_cutout_images): if image_data["need_cutout"] == False: continue num += 1 root_path_list.append(image_data["root_path"]) file_path = image_data["file_path"] original_pic = Picture(file_path) if original_pic.x > original_pic.y: if original_pic.x > resize: original_pic.resize(resize) else: if original_pic.y > resize: original_pic.resize_by_heigh(heigh=resize) buffer = io.BytesIO() if original_pic.im.mode == "RGBA": original_pic.im.save(buffer, format="PNG") else: original_pic.im.save(buffer, format="JPEG") buffer.seek(0) image_url = self.get_online_data.upload_pic(file_path=None, buffer=buffer) baseImages.append(image_url) self.need_cutout_images[index]["image_url"] = image_url data = { "base_image": baseImages, "out_width": self.params.out_width, "out_height": self.params.out_height, "size_mode": self.params.size_mode, "output_mode": self.params.output_mode, } result_json = self.get_online_data.model_form_segment(data) generate_ids = result_json.get("generate_ids") saveParams = [] for idx, id in enumerate(generate_ids): if self.need_cutout_images[idx]["need_cutout"] == False: continue self.need_cutout_images[idx]["generate_id"] = id # ['file_name', 'file_e', 'file_path', # 'file','root_path','need_cutout','image_url','generate_id','status'] item = self.need_cutout_images[idx] saveParams.append( [ item["file_name"], item["file_e"], item["file_path"], item["file"], item["root_path"], item["need_cutout"], item["image_url"], item["generate_id"], False, ] ) save_root_path = min(root_path_list) # self.addData2Csv(saveParams) return self.need_cutout_images, save_root_path, generate_ids def search_progress(self): # try: # csvData = pd.read_csv(self.csvName) # except FileNotFoundError as e: # raise UnicornException("不存在生成记录,请先提交抠人台抠图任务") """进度查询""" search_generate_ids = self.params.generate_ids dataParams = { "generate_ids": search_generate_ids, "type": self.params.type, } responseData = self.get_online_data.search_progress(dataParams) generate_ids = self.params.result successCount = 0 failCount = 0 is_finished = False save_root_path = "" if generate_ids is None: raise UnicornException("参数异常") if len(generate_ids) > 0: root_path_list = list(map(lambda x: x.get("root_path"), generate_ids)) save_root_path = min(root_path_list) save_path = f"{save_root_path}/已扣图" check_path(save_path) else: return is_finished, successCount, failCount, "" for idx, generate in enumerate(generate_ids): for respItem in responseData: try: if generate["generate_id"] != respItem["id"]: continue except: continue status = respItem["status"] print("status", status) if status == -1: failCount += 1 break if status == 2: successCount += 1 result_image_url = respItem["result_image_urls"][0] result_image_pil = urlPilImage(result_image_url) # root_path = generate["root_path"] file_name = generate["file_name"] file_e = generate["file_e"] if result_image_pil.mode == "RGBA": if os.path.exists(f"{save_path}/{file_name}.png"): break result_image_pil.save(f"{save_path}/{file_name}.png") else: if os.path.exists(f"{save_path}/{file_name}.jpg"): break result_image_pil.save(f"{save_path}/{file_name}.jpg") break is_finished = ( True if len(search_generate_ids) == (successCount + failCount) else False ) return is_finished, successCount, failCount, save_root_path