cd.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. from __future__ import annotations
  2. import importlib
  3. from codecs import IncrementalDecoder
  4. from collections import Counter
  5. from functools import lru_cache
  6. from typing import Counter as TypeCounter
  7. from .constant import (
  8. FREQUENCIES,
  9. KO_NAMES,
  10. LANGUAGE_SUPPORTED_COUNT,
  11. TOO_SMALL_SEQUENCE,
  12. ZH_NAMES,
  13. _FREQUENCIES_SET,
  14. _FREQUENCIES_RANK,
  15. )
  16. from .md import is_suspiciously_successive_range
  17. from .models import CoherenceMatches
  18. from .utils import (
  19. is_accentuated,
  20. is_latin,
  21. is_multi_byte_encoding,
  22. is_unicode_range_secondary,
  23. unicode_range,
  24. )
  25. def encoding_unicode_range(iana_name: str) -> list[str]:
  26. """
  27. Return associated unicode ranges in a single byte code page.
  28. """
  29. if is_multi_byte_encoding(iana_name):
  30. raise OSError( # Defensive:
  31. "Function not supported on multi-byte code page"
  32. )
  33. decoder = importlib.import_module(f"encodings.{iana_name}").IncrementalDecoder
  34. p: IncrementalDecoder = decoder(errors="ignore")
  35. seen_ranges: dict[str, int] = {}
  36. character_count: int = 0
  37. for i in range(0x40, 0xFF):
  38. chunk: str = p.decode(bytes([i]))
  39. if chunk:
  40. character_range: str | None = unicode_range(chunk)
  41. if character_range is None:
  42. continue
  43. if is_unicode_range_secondary(character_range) is False:
  44. if character_range not in seen_ranges:
  45. seen_ranges[character_range] = 0
  46. seen_ranges[character_range] += 1
  47. character_count += 1
  48. return sorted(
  49. [
  50. character_range
  51. for character_range in seen_ranges
  52. if seen_ranges[character_range] / character_count >= 0.15
  53. ]
  54. )
  55. def unicode_range_languages(primary_range: str) -> list[str]:
  56. """
  57. Return inferred languages used with a unicode range.
  58. """
  59. languages: list[str] = []
  60. for language, characters in FREQUENCIES.items():
  61. for character in characters:
  62. if unicode_range(character) == primary_range:
  63. languages.append(language)
  64. break
  65. return languages
  66. @lru_cache()
  67. def encoding_languages(iana_name: str) -> list[str]:
  68. """
  69. Single-byte encoding language association. Some code page are heavily linked to particular language(s).
  70. This function does the correspondence.
  71. """
  72. unicode_ranges: list[str] = encoding_unicode_range(iana_name)
  73. primary_range: str | None = None
  74. for specified_range in unicode_ranges:
  75. if "Latin" not in specified_range:
  76. primary_range = specified_range
  77. break
  78. if primary_range is None:
  79. return ["Latin Based"]
  80. return unicode_range_languages(primary_range)
  81. @lru_cache()
  82. def mb_encoding_languages(iana_name: str) -> list[str]:
  83. """
  84. Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
  85. This function does the correspondence.
  86. """
  87. if (
  88. iana_name.startswith("shift_")
  89. or iana_name.startswith("iso2022_jp")
  90. or iana_name.startswith("euc_j")
  91. or iana_name == "cp932"
  92. ):
  93. return ["Japanese"]
  94. if iana_name.startswith("gb") or iana_name in ZH_NAMES:
  95. return ["Chinese"]
  96. if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
  97. return ["Korean"]
  98. return []
  99. @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
  100. def get_target_features(language: str) -> tuple[bool, bool]:
  101. """
  102. Determine main aspects from a supported language if it contains accents and if is pure Latin.
  103. """
  104. target_have_accents: bool = False
  105. target_pure_latin: bool = True
  106. for character in FREQUENCIES[language]:
  107. if not target_have_accents and is_accentuated(character):
  108. target_have_accents = True
  109. if target_pure_latin and is_latin(character) is False:
  110. target_pure_latin = False
  111. return target_have_accents, target_pure_latin
  112. def alphabet_languages(
  113. characters: list[str], ignore_non_latin: bool = False
  114. ) -> list[str]:
  115. """
  116. Return associated languages associated to given characters.
  117. """
  118. languages: list[tuple[str, float]] = []
  119. characters_set: frozenset[str] = frozenset(characters)
  120. source_have_accents = any(is_accentuated(character) for character in characters)
  121. for language, language_characters in FREQUENCIES.items():
  122. target_have_accents, target_pure_latin = get_target_features(language)
  123. if ignore_non_latin and target_pure_latin is False:
  124. continue
  125. if target_have_accents is False and source_have_accents:
  126. continue
  127. character_count: int = len(language_characters)
  128. character_match_count: int = len(_FREQUENCIES_SET[language] & characters_set)
  129. ratio: float = character_match_count / character_count
  130. if ratio >= 0.2:
  131. languages.append((language, ratio))
  132. languages = sorted(languages, key=lambda x: x[1], reverse=True)
  133. return [compatible_language[0] for compatible_language in languages]
  134. def characters_popularity_compare(
  135. language: str, ordered_characters: list[str]
  136. ) -> float:
  137. """
  138. Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
  139. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
  140. Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
  141. """
  142. if language not in FREQUENCIES:
  143. raise ValueError(f"{language} not available") # Defensive:
  144. character_approved_count: int = 0
  145. frequencies_language_set: frozenset[str] = _FREQUENCIES_SET[language]
  146. lang_rank: dict[str, int] = _FREQUENCIES_RANK[language]
  147. ordered_characters_count: int = len(ordered_characters)
  148. target_language_characters_count: int = len(FREQUENCIES[language])
  149. large_alphabet: bool = target_language_characters_count > 26
  150. expected_projection_ratio: float = (
  151. target_language_characters_count / ordered_characters_count
  152. )
  153. # Pre-built rank dict for ordered_characters (avoids repeated list slicing).
  154. ordered_rank: dict[str, int] = {
  155. char: rank for rank, char in enumerate(ordered_characters)
  156. }
  157. # Pre-compute characters common to both orderings.
  158. # Avoids repeated `c in ordered_rank` dict lookups in the inner counts.
  159. common_chars: list[tuple[int, int]] = [
  160. (lr, ordered_rank[c]) for c, lr in lang_rank.items() if c in ordered_rank
  161. ]
  162. # Pre-extract lr and orr arrays for faster iteration in the inner loop.
  163. # Plain integer loops with local arrays are much faster under mypyc than
  164. # generator expression sums over a list of tuples.
  165. common_count: int = len(common_chars)
  166. common_lr: list[int] = [p[0] for p in common_chars]
  167. common_orr: list[int] = [p[1] for p in common_chars]
  168. for character, character_rank in zip(
  169. ordered_characters, range(0, ordered_characters_count)
  170. ):
  171. if character not in frequencies_language_set:
  172. continue
  173. character_rank_in_language: int = lang_rank[character]
  174. character_rank_projection: int = int(character_rank * expected_projection_ratio)
  175. if (
  176. large_alphabet is False
  177. and abs(character_rank_projection - character_rank_in_language) > 4
  178. ):
  179. continue
  180. if (
  181. large_alphabet is True
  182. and abs(character_rank_projection - character_rank_in_language)
  183. < target_language_characters_count / 3
  184. ):
  185. character_approved_count += 1
  186. continue
  187. # Count how many characters appear "before" in both orderings,
  188. # and how many appear "at or after" in both orderings.
  189. # Single pass over pre-extracted arrays — much faster under mypyc
  190. # than two generator expression sums.
  191. before_match_count: int = 0
  192. after_match_count: int = 0
  193. for i in range(common_count):
  194. lr_i: int = common_lr[i]
  195. orr_i: int = common_orr[i]
  196. if lr_i < character_rank_in_language:
  197. if orr_i < character_rank:
  198. before_match_count += 1
  199. else:
  200. if orr_i >= character_rank:
  201. after_match_count += 1
  202. after_len: int = target_language_characters_count - character_rank_in_language
  203. if character_rank_in_language == 0 and before_match_count <= 4:
  204. character_approved_count += 1
  205. continue
  206. if after_len == 0 and after_match_count <= 4:
  207. character_approved_count += 1
  208. continue
  209. if (
  210. character_rank_in_language > 0
  211. and before_match_count / character_rank_in_language >= 0.4
  212. ) or (after_len > 0 and after_match_count / after_len >= 0.4):
  213. character_approved_count += 1
  214. continue
  215. return character_approved_count / len(ordered_characters)
  216. def alpha_unicode_split(decoded_sequence: str) -> list[str]:
  217. """
  218. Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
  219. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
  220. One containing the latin letters and the other hebrew.
  221. """
  222. layers: dict[str, list[str]] = {}
  223. # Fast path: track single-layer key to skip dict iteration for single-script text.
  224. single_layer_key: str | None = None
  225. multi_layer: bool = False
  226. # Cache the last character_range and its resolved layer to avoid repeated
  227. # is_suspiciously_successive_range calls for consecutive same-range chars.
  228. prev_character_range: str | None = None
  229. prev_layer_target: str | None = None
  230. for character in decoded_sequence:
  231. if character.isalpha() is False:
  232. continue
  233. # ASCII fast-path: a-z and A-Z are always "Basic Latin".
  234. # Avoids unicode_range() function call overhead for the most common case.
  235. character_ord: int = ord(character)
  236. if character_ord < 128:
  237. character_range: str | None = "Basic Latin"
  238. else:
  239. character_range = unicode_range(character)
  240. if character_range is None:
  241. continue
  242. # Fast path: same range as previous character → reuse cached layer target.
  243. if character_range == prev_character_range:
  244. if prev_layer_target is not None:
  245. layers[prev_layer_target].append(character)
  246. continue
  247. layer_target_range: str | None = None
  248. if multi_layer:
  249. for discovered_range in layers:
  250. if (
  251. is_suspiciously_successive_range(discovered_range, character_range)
  252. is False
  253. ):
  254. layer_target_range = discovered_range
  255. break
  256. elif single_layer_key is not None:
  257. if (
  258. is_suspiciously_successive_range(single_layer_key, character_range)
  259. is False
  260. ):
  261. layer_target_range = single_layer_key
  262. if layer_target_range is None:
  263. layer_target_range = character_range
  264. if layer_target_range not in layers:
  265. layers[layer_target_range] = []
  266. if single_layer_key is None:
  267. single_layer_key = layer_target_range
  268. else:
  269. multi_layer = True
  270. layers[layer_target_range].append(character)
  271. # Cache for next iteration
  272. prev_character_range = character_range
  273. prev_layer_target = layer_target_range
  274. return ["".join(chars).lower() for chars in layers.values()]
  275. def merge_coherence_ratios(results: list[CoherenceMatches]) -> CoherenceMatches:
  276. """
  277. This function merge results previously given by the function coherence_ratio.
  278. The return type is the same as coherence_ratio.
  279. """
  280. per_language_ratios: dict[str, list[float]] = {}
  281. for result in results:
  282. for sub_result in result:
  283. language, ratio = sub_result
  284. if language not in per_language_ratios:
  285. per_language_ratios[language] = [ratio]
  286. continue
  287. per_language_ratios[language].append(ratio)
  288. merge = [
  289. (
  290. language,
  291. round(
  292. sum(per_language_ratios[language]) / len(per_language_ratios[language]),
  293. 4,
  294. ),
  295. )
  296. for language in per_language_ratios
  297. ]
  298. return sorted(merge, key=lambda x: x[1], reverse=True)
  299. def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
  300. """
  301. We shall NOT return "English—" in CoherenceMatches because it is an alternative
  302. of "English". This function only keeps the best match and remove the em-dash in it.
  303. """
  304. index_results: dict[str, list[float]] = dict()
  305. for result in results:
  306. language, ratio = result
  307. no_em_name: str = language.replace("—", "")
  308. if no_em_name not in index_results:
  309. index_results[no_em_name] = []
  310. index_results[no_em_name].append(ratio)
  311. if any(len(index_results[e]) > 1 for e in index_results):
  312. filtered_results: CoherenceMatches = []
  313. for language in index_results:
  314. filtered_results.append((language, max(index_results[language])))
  315. return filtered_results
  316. return results
  317. @lru_cache(maxsize=2048)
  318. def coherence_ratio(
  319. decoded_sequence: str, threshold: float = 0.1, lg_inclusion: str | None = None
  320. ) -> CoherenceMatches:
  321. """
  322. Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
  323. A layer = Character extraction by alphabets/ranges.
  324. """
  325. results: list[tuple[str, float]] = []
  326. ignore_non_latin: bool = False
  327. sufficient_match_count: int = 0
  328. lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
  329. if "Latin Based" in lg_inclusion_list:
  330. ignore_non_latin = True
  331. lg_inclusion_list.remove("Latin Based")
  332. for layer in alpha_unicode_split(decoded_sequence):
  333. sequence_frequencies: TypeCounter[str] = Counter(layer)
  334. most_common = sequence_frequencies.most_common()
  335. character_count: int = len(layer)
  336. if character_count <= TOO_SMALL_SEQUENCE:
  337. continue
  338. popular_character_ordered: list[str] = [c for c, o in most_common]
  339. for language in lg_inclusion_list or alphabet_languages(
  340. popular_character_ordered, ignore_non_latin
  341. ):
  342. ratio: float = characters_popularity_compare(
  343. language, popular_character_ordered
  344. )
  345. if ratio < threshold:
  346. continue
  347. elif ratio >= 0.8:
  348. sufficient_match_count += 1
  349. results.append((language, round(ratio, 4)))
  350. if sufficient_match_count >= 3:
  351. break
  352. return sorted(
  353. filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
  354. )