preprocess.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. import os
  2. from PIL import Image, ImageOps
  3. import math
  4. import tqdm
  5. from modules import paths, shared, images, deepbooru
  6. from modules.textual_inversion import autocrop
  7. def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.15, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
  8. try:
  9. if process_caption:
  10. shared.interrogator.load()
  11. if process_caption_deepbooru:
  12. deepbooru.model.start()
  13. preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
  14. finally:
  15. if process_caption:
  16. shared.interrogator.send_blip_to_ram()
  17. if process_caption_deepbooru:
  18. deepbooru.model.stop()
  19. def listfiles(dirname):
  20. return os.listdir(dirname)
  21. class PreprocessParams:
  22. src = None
  23. dstdir = None
  24. subindex = 0
  25. flip = False
  26. process_caption = False
  27. process_caption_deepbooru = False
  28. preprocess_txt_action = None
  29. def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None):
  30. caption = ""
  31. if params.process_caption:
  32. caption += shared.interrogator.generate_caption(image)
  33. if params.process_caption_deepbooru:
  34. if caption:
  35. caption += ", "
  36. caption += deepbooru.model.tag_multi(image)
  37. filename_part = params.src
  38. filename_part = os.path.splitext(filename_part)[0]
  39. filename_part = os.path.basename(filename_part)
  40. basename = f"{index:05}-{params.subindex}-{filename_part}"
  41. image.save(os.path.join(params.dstdir, f"{basename}.png"))
  42. if params.preprocess_txt_action == 'prepend' and existing_caption:
  43. caption = f"{existing_caption} {caption}"
  44. elif params.preprocess_txt_action == 'append' and existing_caption:
  45. caption = f"{caption} {existing_caption}"
  46. elif params.preprocess_txt_action == 'copy' and existing_caption:
  47. caption = existing_caption
  48. caption = caption.strip()
  49. if caption:
  50. with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file:
  51. file.write(caption)
  52. params.subindex += 1
  53. def save_pic(image, index, params, existing_caption=None):
  54. save_pic_with_caption(image, index, params, existing_caption=existing_caption)
  55. if params.flip:
  56. save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption)
  57. def split_pic(image, inverse_xy, width, height, overlap_ratio):
  58. if inverse_xy:
  59. from_w, from_h = image.height, image.width
  60. to_w, to_h = height, width
  61. else:
  62. from_w, from_h = image.width, image.height
  63. to_w, to_h = width, height
  64. h = from_h * to_w // from_w
  65. if inverse_xy:
  66. image = image.resize((h, to_w))
  67. else:
  68. image = image.resize((to_w, h))
  69. split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
  70. y_step = (h - to_h) / (split_count - 1)
  71. for i in range(split_count):
  72. y = int(y_step * i)
  73. if inverse_xy:
  74. splitted = image.crop((y, 0, y + to_h, to_w))
  75. else:
  76. splitted = image.crop((0, y, to_w, y + to_h))
  77. yield splitted
  78. # not using torchvision.transforms.CenterCrop because it doesn't allow float regions
  79. def center_crop(image: Image, w: int, h: int):
  80. iw, ih = image.size
  81. if ih / h < iw / w:
  82. sw = w * ih / h
  83. box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
  84. else:
  85. sh = h * iw / w
  86. box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
  87. return image.resize((w, h), Image.Resampling.LANCZOS, box)
  88. def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
  89. iw, ih = image.size
  90. err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
  91. wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
  92. if minarea <= w * h <= maxarea and err(w, h) <= threshold),
  93. key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1],
  94. default=None
  95. )
  96. return wh and center_crop(image, *wh)
  97. def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
  98. width = process_width
  99. height = process_height
  100. src = os.path.abspath(process_src)
  101. dst = os.path.abspath(process_dst)
  102. split_threshold = max(0.0, min(1.0, split_threshold))
  103. overlap_ratio = max(0.0, min(0.9, overlap_ratio))
  104. assert src != dst, 'same directory specified as source and destination'
  105. os.makedirs(dst, exist_ok=True)
  106. files = listfiles(src)
  107. shared.state.job = "preprocess"
  108. shared.state.textinfo = "Preprocessing..."
  109. shared.state.job_count = len(files)
  110. params = PreprocessParams()
  111. params.dstdir = dst
  112. params.flip = process_flip
  113. params.process_caption = process_caption
  114. params.process_caption_deepbooru = process_caption_deepbooru
  115. params.preprocess_txt_action = preprocess_txt_action
  116. pbar = tqdm.tqdm(files)
  117. for index, imagefile in enumerate(pbar):
  118. params.subindex = 0
  119. filename = os.path.join(src, imagefile)
  120. try:
  121. img = Image.open(filename)
  122. img = ImageOps.exif_transpose(img)
  123. img = img.convert("RGB")
  124. except Exception:
  125. continue
  126. description = f"Preprocessing [Image {index}/{len(files)}]"
  127. pbar.set_description(description)
  128. shared.state.textinfo = description
  129. params.src = filename
  130. existing_caption = None
  131. existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt"
  132. if os.path.exists(existing_caption_filename):
  133. with open(existing_caption_filename, 'r', encoding="utf8") as file:
  134. existing_caption = file.read()
  135. if shared.state.interrupted:
  136. break
  137. if img.height > img.width:
  138. ratio = (img.width * height) / (img.height * width)
  139. inverse_xy = False
  140. else:
  141. ratio = (img.height * width) / (img.width * height)
  142. inverse_xy = True
  143. process_default_resize = True
  144. if process_split and ratio < 1.0 and ratio <= split_threshold:
  145. for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio):
  146. save_pic(splitted, index, params, existing_caption=existing_caption)
  147. process_default_resize = False
  148. if process_focal_crop and img.height != img.width:
  149. dnn_model_path = None
  150. try:
  151. dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
  152. except Exception as e:
  153. print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
  154. autocrop_settings = autocrop.Settings(
  155. crop_width = width,
  156. crop_height = height,
  157. face_points_weight = process_focal_crop_face_weight,
  158. entropy_points_weight = process_focal_crop_entropy_weight,
  159. corner_points_weight = process_focal_crop_edges_weight,
  160. annotate_image = process_focal_crop_debug,
  161. dnn_model_path = dnn_model_path,
  162. )
  163. for focal in autocrop.crop_image(img, autocrop_settings):
  164. save_pic(focal, index, params, existing_caption=existing_caption)
  165. process_default_resize = False
  166. if process_multicrop:
  167. cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
  168. if cropped is not None:
  169. save_pic(cropped, index, params, existing_caption=existing_caption)
  170. else:
  171. print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
  172. process_default_resize = False
  173. if process_keep_original_size:
  174. save_pic(img, index, params, existing_caption=existing_caption)
  175. process_default_resize = False
  176. if process_default_resize:
  177. img = images.resize_image(1, img, width, height)
  178. save_pic(img, index, params, existing_caption=existing_caption)
  179. shared.state.nextjob()