new-words

annotate new-words.py @ 55:2a1a25e61872

new-words.py can be used without wrapper; several features are not still implemented
author Igor Chubin <igor@chub.in>
date Thu Nov 03 15:53:59 2011 +0100 (2011-11-03)
parents e25de9ea9184
children 3682038403ad
rev   line source
igor@37 1 #!/usr/bin/env python
igor@38 2 # -*- coding: utf-8 -*-
igor@37 3
igor@40 4 from __future__ import with_statement
igor@38 5 import codecs
igor@49 6 import difflib
igor@38 7 import logging
igor@38 8 import os
igor@37 9 import optparse
igor@38 10 import re
igor@38 11 import subprocess
igor@38 12 import sys
igor@38 13 import Stemmer
igor@54 14 import tempfile
igor@42 15 try:
igor@42 16 import psyco
igor@42 17 psyco.full()
igor@42 18 except:
igor@42 19 pass
igor@38 20
igor@38 21 config = {
igor@38 22 'config_directory': os.environ['HOME'] + '/.new-words',
igor@38 23 'language': 'en',
igor@38 24 }
igor@38 25
igor@38 26 logging.basicConfig(filename='/tmp/new-words-py.log', level=logging.DEBUG)
igor@38 27
igor@38 28 class Normalizator:
igor@38 29 def __init__(self, language, linked_words={}):
igor@38 30 stemmer_algorithm = {
igor@38 31 'de' : 'german',
igor@38 32 'en' : 'english',
igor@51 33 'es' : 'spanish',
igor@38 34 'ru' : 'russian',
igor@51 35 'it' : 'italian',
igor@38 36 'uk' : 'ukrainian',
igor@38 37 }
igor@38 38 self.stemmer = Stemmer.Stemmer(stemmer_algorithm[language])
igor@38 39 self.linked_words = linked_words
igor@38 40
igor@38 41 def normalize(self, word):
igor@38 42 word_chain = []
igor@38 43 while word in self.linked_words and not word in word_chain:
igor@38 44 word_chain.append(word)
igor@38 45 word = self.linked_words[word]
igor@38 46 return self.stemmer.stemWord(word.lower())
igor@37 47
igor@47 48 def best_word_from_group(self, wordpairs_group):
igor@47 49 """Returns the word that is the most relevant to the wordpairs_group.
igor@47 50
igor@47 51 At the moment: returns the word with minimal length"""
igor@49 52
igor@49 53 def f(x, y):
igor@49 54 return difflib.SequenceMatcher(
igor@49 55 None,
igor@49 56 #(x[-2:] == 'en' and x[:-2].lower() or x.lower()),
igor@49 57 x.lower(),
igor@49 58 y.lower()).ratio()
igor@47 59
igor@47 60 minimal_length = min(len(pair[1]) for pair in wordpairs_group)
igor@49 61 best_match = list(x[1] for x in sorted(
igor@47 62 (x for x in wordpairs_group if len(x[1]) == minimal_length),
igor@47 63 key=lambda x:x[0],
igor@47 64 reverse=True))[0]
igor@47 65
igor@51 66 return best_match
igor@51 67
igor@49 68 suggestions = self.dictionary_suggestions(best_match)
igor@49 69 if len(suggestions) == 1:
igor@49 70 return best_match
igor@49 71
igor@49 72 verb = False
igor@49 73 corrected_best_match = best_match
igor@49 74 if best_match[-2:] == 'et':
igor@49 75 word = best_match[:-1]+"n"
igor@49 76 sugg = self.dictionary_suggestions(word)
igor@49 77 if len(sugg) == 1:
igor@49 78 return word
igor@49 79 suggestions += sugg
igor@49 80 corrected_best_match = word
igor@49 81 corrected_best_match = best_match[:-2]
igor@49 82 verb = True
igor@49 83
igor@49 84 if best_match[-1] == 't':
igor@49 85 word = best_match[:-1]+"en"
igor@49 86 sugg = self.dictionary_suggestions(word)
igor@49 87 if len(sugg) == 1:
igor@49 88 return word
igor@49 89 suggestions += sugg
igor@49 90 corrected_best_match = best_match[:-1]
igor@49 91 verb = True
igor@49 92
igor@49 93 if corrected_best_match[0].lower() == corrected_best_match[0]:
igor@49 94 suggestions = [ x for x in suggestions
igor@49 95 if x[0].lower() == x[0] ]
igor@49 96
igor@49 97 if suggestions == []:
igor@49 98 return best_match+"_"
igor@49 99 return best_match+" "+(" ".join(
igor@49 100 sorted(
igor@49 101 suggestions,
igor@49 102 key = lambda x: f(x, corrected_best_match),
igor@49 103 reverse = True
igor@49 104 )
igor@49 105 )
igor@49 106 )
igor@49 107
igor@49 108 def dictionary_suggestions(self, word):
igor@49 109 return [
igor@49 110 x.decode('utf-8').rstrip('\n')
igor@49 111 for x
igor@49 112 in subprocess.Popen(
igor@49 113 ["de-variants", word],
igor@49 114 stdout=subprocess.PIPE
igor@49 115 ).stdout.readlines() ]
igor@49 116
igor@49 117
igor@37 118 parser = optparse.OptionParser()
igor@37 119
igor@37 120 parser.add_option(
igor@37 121 "-a", "--no-marks",
igor@55 122 help="don't add marks (and don't save marks added by user) [NOT IMPLEMENTED YET]",
igor@37 123 action="store_true",
igor@37 124 dest="no_marks")
igor@37 125
igor@37 126 parser.add_option(
igor@37 127 "-c", "--compressed",
igor@37 128 help="show compressed wordlist: one word per group",
igor@37 129 action="store_true",
igor@37 130 dest="compressed")
igor@37 131
igor@37 132 parser.add_option(
igor@37 133 "-k", "--known-words",
igor@37 134 help="put higher words that are similar to the known words (only for English)",
igor@37 135 action="store_true",
igor@37 136 dest="compressed")
igor@37 137
igor@37 138 parser.add_option(
igor@37 139 "-l", "--language",
igor@37 140 help="specify language of text",
igor@37 141 action="store",
igor@37 142 dest="language")
igor@37 143
igor@37 144 parser.add_option(
igor@54 145 "-f", "--allowed-words",
igor@54 146 help="file with list of allowed words (words that will be shown in the output)",
igor@54 147 action="store",
igor@54 148 dest="allowed_words")
igor@54 149
igor@54 150 parser.add_option(
igor@55 151 "-G", "--words-grouping",
igor@55 152 help="turn off word grouping",
igor@55 153 action="store_true",
igor@55 154 dest="no_words_grouping")
igor@55 155
igor@55 156 parser.add_option(
igor@54 157 "-X", "--function",
igor@38 158 help="filter through subsystem [INTERNAL]",
igor@38 159 action="store",
igor@38 160 dest="function")
igor@38 161
igor@38 162 parser.add_option(
igor@37 163 "-m", "--merge-tag",
igor@55 164 help="merge words tagged with specified tag into the main vocabulary [NOT IMPLEMENTED YET]",
igor@37 165 action="store",
igor@37 166 dest="merge_tag")
igor@37 167
igor@37 168 parser.add_option(
igor@37 169 "-M", "--merge-tagged",
igor@55 170 help="merge words tagged with ANY tag into the main vocabulary [NOT IMPLEMENTED YET]",
igor@37 171 action="store_true",
igor@37 172 dest="merge_tagged")
igor@37 173
igor@37 174 parser.add_option(
igor@37 175 "-n", "--non-interactive",
igor@37 176 help="non-interactive mode (don't run vi)",
igor@37 177 action="store_true",
igor@37 178 dest="non_interactive")
igor@37 179
igor@37 180 parser.add_option(
igor@37 181 "-N", "--no-filter",
igor@37 182 help="switch off known words filtering",
igor@37 183 action="store_true",
igor@37 184 dest="no_filter")
igor@37 185
igor@37 186 parser.add_option(
igor@37 187 "-p", "--pages",
igor@37 188 help="work with specified pages only (pages = start-stop/total )",
igor@37 189 action="store",
igor@37 190 dest="pages")
igor@37 191
igor@37 192 parser.add_option(
igor@48 193 "-d", "--delete-tag",
igor@48 194 help="delete subvocabulary of specified tag",
igor@37 195 action="store",
igor@48 196 dest="delete_tag")
igor@37 197
igor@37 198 parser.add_option(
igor@55 199 "-r", "--show-range",
igor@55 200 help="show only words specified number of words",
igor@55 201 action="store",
igor@55 202 dest="show_range")
igor@55 203
igor@55 204 parser.add_option(
igor@54 205 "-R", "--show-range-percentage",
igor@54 206 help="show only words that cover specified percentage of the text, skip the rest",
igor@54 207 action="store",
igor@54 208 dest="show_range_percentage")
igor@54 209
igor@54 210 parser.add_option(
igor@37 211 "-s", "--text-stats",
igor@37 212 help="show the text statistics (percentage of known words and so on) and exit",
igor@37 213 action="store_true",
igor@37 214 dest="text_stats")
igor@37 215
igor@37 216 parser.add_option(
igor@37 217 "-S", "--voc-stats",
igor@55 218 help="show your vocabulary statistics (number of words and word groups) [NOT IMPLEMENTED YET]",
igor@37 219 action="store_true",
igor@37 220 dest="voc_stats")
igor@37 221
igor@37 222 parser.add_option(
igor@37 223 "-t", "--tag",
igor@37 224 help="tag known words with tag",
igor@37 225 action="store",
igor@37 226 dest="tag")
igor@37 227
igor@37 228 parser.add_option(
igor@37 229 "-T", "--show-tags",
igor@37 230 help="tag known words with tag",
igor@37 231 action="store_true",
igor@37 232 dest="show_tags")
igor@37 233
igor@37 234 parser.add_option(
igor@37 235 "-2", "--two-words",
igor@37 236 help="find 2 words' sequences",
igor@37 237 action="store_true",
igor@37 238 dest="two_words")
igor@37 239
igor@37 240 parser.add_option(
igor@37 241 "-3", "--three-words",
igor@37 242 help="find 3 words' sequences",
igor@37 243 action="store_true",
igor@37 244 dest="three_words")
igor@37 245
igor@38 246 def readlines_from_file(filename):
igor@38 247 res = []
igor@38 248 with codecs.open(filename, "r", "utf-8") as f:
igor@38 249 for line in f.readlines():
igor@38 250 res += [line]
igor@38 251 return res
igor@38 252
igor@54 253 def readlines_from_url(url):
igor@54 254 return [x.decode('utf-8') for x in
igor@54 255 subprocess.Popen(
igor@54 256 "lynx -dump '{url}' | perl -p -e 's@http://[a-zA-Z&_.:/0-9%?=,#+()\[\]~-]*@@'".format(url=url),
igor@54 257 shell = True,
igor@54 258 stdout = subprocess.PIPE,
igor@54 259 stderr = subprocess.STDOUT
igor@54 260 ).communicate()[0].split('\n')
igor@54 261 ]
igor@54 262
igor@38 263 def readlines_from_stdin():
igor@38 264 return codecs.getreader("utf-8")(sys.stdin).readlines()
igor@38 265
igor@38 266 def words_from_line(line):
igor@38 267 line = line.rstrip('\n')
igor@38 268 #return re.split('(?:\s|[*\r,.:#@()+=<>$;"?!|\[\]^%&~{}«»–])+', line)
igor@38 269 #return re.split('[^a-zA-ZäöëüßÄËÖÜß]+', line)
igor@44 270 return re.compile("(?!['_])(?:\W)+", flags=re.UNICODE).split(line)
igor@38 271
igor@44 272 def get_words(lines, group_by=[1]):
igor@38 273 """
igor@38 274 Returns hash of words in a file
igor@38 275 word => number
igor@38 276 """
igor@38 277 result = {}
igor@44 278 (a, b, c) = ("", "", "")
igor@38 279 for line in lines:
igor@38 280 words = words_from_line(line)
igor@38 281 for word in words:
igor@41 282 if re.match('[0-9]*$', word):
igor@41 283 continue
igor@38 284 result.setdefault(word, 0)
igor@38 285 result[word] += 1
igor@44 286 if 2 in group_by and a != "" and b != "":
igor@44 287 w = "%s_%s" % (a,b)
igor@44 288 result.setdefault(w, 0)
igor@44 289 result[w] += 1
igor@44 290 if 3 in group_by and not "" in [a,b,c]:
igor@44 291 w = "%s_%s_%s" % (a,b,c)
igor@44 292 result.setdefault(w, 0)
igor@44 293 result[w] += 1
igor@44 294 (a,b,c) = (b, c, word)
igor@44 295
igor@44 296 logging.debug(result)
igor@38 297 return result
igor@38 298
igor@54 299 def voc_filename():
igor@54 300 return "%s/%s.txt"%(config['config_directory'], config['language'])
igor@54 301
igor@38 302 def load_vocabulary():
igor@54 303 return get_words(readlines_from_file(voc_filename()))
igor@38 304
igor@38 305 def notes_filenames():
igor@38 306 return ["%s/notes-%s.txt"%(config['config_directory'], config['language'])]
igor@38 307
igor@38 308 def load_notes(files):
igor@38 309 notes = {}
igor@38 310 for filename in files:
igor@39 311 with codecs.open(filename, "r", "utf-8") as f:
igor@38 312 for line in f.readlines():
igor@38 313 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
igor@38 314 notes.setdefault(word, {})
igor@38 315 notes[word][filename] = note
igor@38 316 return notes
igor@38 317
igor@39 318 def add_notes(lines, notes):
igor@39 319 notes_filename = notes_filenames()[0]
igor@39 320 result = []
igor@39 321 for line in lines:
igor@39 322 if line.startswith('#'):
igor@39 323 result += [line]
igor@39 324 else:
igor@39 325 match_object = re.search('^\s*\S+\s*(\S+)', line)
igor@39 326 if match_object:
igor@39 327 word = match_object.group(1)
igor@39 328 if word in notes:
igor@39 329 if notes_filename in notes[word]:
igor@39 330 line = line.rstrip('\n')
igor@39 331 line = "%-30s %s\n" % (line, notes[word][notes_filename])
igor@39 332 result += [line]
igor@39 333 else:
igor@39 334 result += [line]
igor@39 335 else:
igor@39 336 result += [line]
igor@39 337 return result
igor@39 338
igor@39 339 def remove_notes(lines, notes_group):
igor@39 340 notes_filename = notes_filenames()[0]
igor@39 341 notes = {}
igor@39 342 for k in notes_group.keys():
igor@39 343 if notes_filename in notes_group[k]:
igor@39 344 notes[k] = notes_group[k][notes_filename]
igor@39 345
igor@39 346 result = []
igor@39 347 for line in lines:
igor@39 348 line = line.rstrip('\n')
igor@39 349 match_object = re.match('(\s+)(\S+)(\s+)(\S+)(\s+)(.*)', line)
igor@39 350 if match_object:
igor@39 351 result.append("".join([
igor@39 352 match_object.group(1),
igor@39 353 match_object.group(2),
igor@39 354 match_object.group(3),
igor@39 355 match_object.group(4),
igor@39 356 "\n"
igor@39 357 ]))
igor@39 358 notes[match_object.group(4)] = match_object.group(6)
igor@39 359 else:
igor@39 360 result.append(line+"\n")
igor@39 361
igor@39 362 save_notes(notes_filename, notes)
igor@39 363 return result
igor@39 364
igor@39 365 def save_notes(filename, notes):
igor@39 366 lines = []
igor@39 367 saved_words = []
igor@39 368 with codecs.open(filename, "r", "utf-8") as f:
igor@39 369 for line in f.readlines():
igor@39 370 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
igor@39 371 if word in notes:
igor@39 372 line = "%-29s %s\n" % (word, notes[word])
igor@39 373 saved_words.append(word)
igor@39 374 lines.append(line)
igor@39 375 for word in [x for x in notes.keys() if not x in saved_words]:
igor@39 376 line = "%-29s %s\n" % (word, notes[word])
igor@39 377 lines.append(line)
igor@39 378
igor@39 379 with codecs.open(filename, "w", "utf-8") as f:
igor@39 380 for line in lines:
igor@39 381 f.write(line)
igor@39 382
igor@39 383
igor@38 384 def substract_dictionary(dict1, dict2):
igor@38 385 """
igor@38 386 returns dict1 - dict2
igor@38 387 """
igor@38 388 result = {}
igor@38 389 for (k,v) in dict1.items():
igor@38 390 if not k in dict2:
igor@38 391 result[k] = v
igor@38 392 return result
igor@38 393
igor@38 394 def dump_words(words, filename):
igor@38 395 with codecs.open(filename, "w+", "utf-8") as f:
igor@38 396 for word in words.keys():
igor@38 397 f.write(("%s\n"%word)*words[word])
igor@38 398
igor@38 399 def error_message(text):
igor@38 400 print text
igor@38 401
igor@40 402 def find_wordgroups_weights(word_pairs, normalizator):
igor@38 403 weight = {}
igor@40 404 for (num, word) in word_pairs:
igor@38 405 normalized = normalizator.normalize(word)
igor@38 406 weight.setdefault(normalized, 0)
igor@40 407 weight[normalized] += num
igor@38 408 return weight
igor@38 409
igor@38 410 def find_linked_words(notes):
igor@38 411 linked_words = {}
igor@38 412 for word in notes.keys():
igor@38 413 for note in notes[word].values():
igor@38 414 if "@" in note:
igor@38 415 result = re.search(r'\@(\S*)', note)
igor@38 416 if result:
igor@38 417 main_word = result.group(1)
igor@38 418 if main_word:
igor@38 419 linked_words[word] = main_word
igor@38 420 return linked_words
igor@38 421
igor@40 422 def compare_word_pairs(pair1, pair2, wgw, normalizator, linked_words):
igor@40 423 (num1, word1) = pair1
igor@40 424 (num2, word2) = pair2
igor@38 425
igor@38 426 normalized_word1 = normalizator.normalize(word1)
igor@38 427 normalized_word2 = normalizator.normalize(word2)
igor@38 428
igor@38 429 cmp_res = cmp(wgw[normalized_word1], wgw[normalized_word2])
igor@38 430 if cmp_res != 0:
igor@38 431 return cmp_res
igor@38 432 else:
igor@38 433 cmp_res = cmp(normalized_word1, normalized_word2)
igor@38 434 if cmp_res != 0:
igor@38 435 return cmp_res
igor@38 436 else:
igor@38 437 return cmp(int(num1), int(num2))
igor@38 438
igor@47 439
igor@48 440 def print_words_sorted(
igor@48 441 word_pairs,
igor@48 442 stats,
igor@48 443 normalizator,
igor@48 444 print_stats=True,
igor@48 445 stats_only=False,
igor@48 446 compressed_wordlist=False,
igor@48 447 show_range=0,
igor@48 448 show_range_percentage=0,
igor@48 449 ):
igor@54 450 result = []
igor@40 451 if stats_only:
igor@54 452 #codecs.getwriter("utf-8")(sys.stdout).write(
igor@54 453 result.append(
igor@43 454 " ".join([
igor@43 455 "%-10s" % x for x in [
igor@43 456 "LANG",
igor@43 457 "KNOWN%",
igor@43 458 "UNKNOWN%",
igor@43 459 "KNOWN",
igor@43 460 "TOTAL",
igor@43 461 "WPS",
igor@43 462 "UWPS*10"
igor@43 463 ]]) + "\n")
igor@54 464 result.append(
igor@43 465 " ".join([
igor@43 466 "%(language)-10s",
igor@43 467 "%(percentage)-10.2f",
igor@43 468 "%(percentage_unknown)-10.2f",
igor@43 469 "%(total_known)-11d"
igor@43 470 "%(total)-11d"
igor@43 471 "%(wps)-11d"
igor@43 472 "%(uwps)-11d"
igor@43 473 ]) % stats + "\n")
igor@54 474 return "".join(result)
igor@38 475
igor@40 476 if print_stats:
igor@54 477 result.append(
igor@43 478 "# %(language)s, %(percentage)-7.2f, <%(total_known)s/%(total)s>, <%(groups)s/%(words)s>\n" % stats)
igor@38 479
igor@40 480 level_lines = range(int(float(stats['percentage']))/5*5+5,95,5)+range(90,102)
igor@40 481 known = int(stats['total_known'])
igor@40 482 total = int(stats['total'])
igor@40 483 current_level = 0
igor@47 484 old_normalized_word = None
igor@47 485 words_of_this_group = []
igor@48 486 printed_words = 0
igor@40 487 for word_pair in word_pairs:
igor@47 488
igor@47 489 normalized_word = normalizator.normalize(word_pair[1])
igor@47 490 if old_normalized_word and old_normalized_word != normalized_word:
igor@47 491 if compressed_wordlist:
igor@49 492 compressed_word_pair = (
igor@49 493 sum(x[0] for x in words_of_this_group),
igor@49 494 normalizator.best_word_from_group(words_of_this_group)
igor@49 495 )
igor@54 496 result.append("%10s %s\n" % compressed_word_pair)
igor@48 497 printed_words += 1
igor@47 498 words_of_this_group = []
igor@47 499
igor@47 500 old_normalized_word = normalized_word
igor@47 501 words_of_this_group.append(word_pair)
igor@47 502
igor@47 503 if not compressed_wordlist:
igor@54 504 result.append("%10s %s\n" % word_pair)
igor@48 505 printed_words += 1
igor@47 506
igor@47 507
igor@40 508 known += word_pair[0]
igor@40 509 if 100.0*known/total >= level_lines[0]:
igor@40 510 current_level = level_lines[0]
igor@40 511 while 100.0*known/total > level_lines[0]:
igor@40 512 current_level = level_lines[0]
igor@40 513 level_lines = level_lines[1:]
igor@54 514 result.append("# %s\n" % current_level)
igor@38 515
igor@48 516 if show_range >0 and printed_words >= show_range:
igor@48 517 break
igor@48 518 if show_range_percentage >0 and 100.0*known/total >= show_range_percentage:
igor@48 519 break
igor@48 520
igor@54 521 return result
igor@39 522
igor@53 523 def parse_parts_description(parts_description):
igor@53 524 """
igor@53 525 Returns triad (start, stop, step)
igor@53 526 basing on parts_description string.
igor@53 527 from-to/step
igor@53 528 from+delta/step
igor@53 529 """
igor@53 530
igor@53 531 try:
igor@53 532 (a, step) = parts_description.split("/", 1)
igor@53 533 step = int(step)
igor@53 534 start = 0
igor@53 535 stop = 0
igor@53 536 if '-' in a:
igor@53 537 (start, stop) = a.split("-", 1)
igor@53 538 start = int(start)
igor@53 539 stop = int(stop)
igor@53 540 elif '+' in a:
igor@53 541 (start, stop) = a.split("+", 1)
igor@53 542 start = int(start)
igor@53 543 stop = int(stop)
igor@53 544 else:
igor@53 545 start = int(a)
igor@53 546 stop = start + 1
igor@53 547 return (start, stop, step)
igor@53 548
igor@53 549 except:
igor@54 550 raise ValueError("Parts description must be in format: num[[+-]num]/num; this [%s] is incorrect" % parts_description)
igor@53 551
igor@53 552
igor@53 553 def take_part(lines, part_description = None):
igor@55 554 if part_description == None or part_description == '':
igor@53 555 return lines
igor@53 556 (start, stop, step) = parse_parts_description(part_description)
igor@53 557 n = len(lines)
igor@53 558 part_size = (1.0*n) / step
igor@53 559 result = []
igor@53 560 for i in range(n):
igor@54 561 if i >= start * part_size and i <= stop * part_size:
igor@54 562 result += [lines[i]]
igor@53 563 return result
igor@53 564
igor@40 565 def filter_get_words_group_words_add_stat(args):
igor@40 566 vocabulary = load_vocabulary()
igor@40 567 notes = load_notes(notes_filenames())
igor@54 568
igor@54 569 if len(args) > 0:
igor@54 570 if 'http://' in args[0]:
igor@54 571 input_lines = readlines_from_url(args[0])
igor@54 572 else:
igor@54 573 input_lines = readlines_from_file(args[0])
igor@54 574 else:
igor@54 575 input_lines = readlines_from_stdin()
igor@54 576
igor@54 577 if len(input_lines) == 0:
igor@54 578 print >> sys.stderr, "Nothing to do, standard input is empty, exiting."
igor@54 579 sys.exit(1)
igor@54 580
igor@54 581 lines = take_part(input_lines, config.get('pages', ''))
igor@54 582
igor@54 583 (_, original_text_tempfile) = tempfile.mkstemp(prefix='new-word')
igor@54 584 with codecs.open(original_text_tempfile, "w", "utf-8") as f:
igor@54 585 f.write("".join(lines))
igor@54 586
igor@44 587 group_by = [1]
igor@48 588
igor@54 589 if 'two_words' in config:
igor@44 590 group_by.append(2)
igor@54 591 if 'three_words' in config:
igor@44 592 group_by.append(3)
igor@44 593 words = get_words(lines, group_by)
igor@43 594 stats_only = False
igor@54 595 if 'text_stats' in config:
igor@43 596 stats_only = True
igor@40 597
igor@47 598 compressed_wordlist = False
igor@54 599 if 'compressed' in config:
igor@47 600 compressed_wordlist = True
igor@47 601
igor@55 602 if 'show_range' in config:
igor@55 603 show_range = int(config['show_range'])
igor@48 604 else:
igor@48 605 show_range = 0
igor@54 606
igor@54 607 if 'show_range_percentage' in config:
igor@54 608 show_range_percentage = int(config['show_range_percentage'])
igor@48 609 else:
igor@48 610 show_range_percentage = 0
igor@48 611
igor@44 612
igor@40 613 stats = {}
igor@40 614 stats['total'] = sum(words[x] for x in words.keys())
igor@54 615 if not 'no_filter' in config:
igor@45 616 words = substract_dictionary(words, vocabulary)
igor@40 617
igor@40 618 stats['total_unknown'] = sum(words[x] for x in words.keys())
igor@40 619 stats['total_known'] = stats['total'] - stats['total_unknown']
igor@43 620 stats['percentage'] = 100.0*stats['total_known']/stats['total']
igor@43 621 stats['percentage_unknown'] = 100.0-100.0*stats['total_known']/stats['total']
igor@40 622 stats['groups'] = 0
igor@40 623 stats['words'] = len(words)
igor@43 624 stats['sentences'] = 0 #FIXME
igor@43 625 stats['wps'] = 0 #FIXME
igor@43 626 stats['uwps'] = 0 #FIXME
igor@40 627 stats['language'] = config['language']
igor@40 628
igor@40 629 linked_words = find_linked_words(notes)
igor@40 630 normalizator = Normalizator(config['language'], linked_words)
igor@40 631
igor@50 632 # filter words by allowed_words_filter
igor@54 633 if 'allowed_words' in config:
igor@54 634 allowed_words_filename = config['allowed_words']
igor@50 635 normalized_allowed_words = [
igor@50 636 normalizator.normalize(w.rstrip('\n'))
igor@50 637 for w in readlines_from_file(allowed_words_filename)
igor@50 638 ]
igor@50 639
igor@50 640 result = {}
igor@50 641 for w, wn in words.iteritems():
igor@50 642 if normalizator.normalize(w) in normalized_allowed_words:
igor@50 643 result[w] = wn
igor@50 644 words = result
igor@50 645
igor@44 646 words_with_freq = []
igor@40 647 for k in sorted(words.keys(), key=lambda k: words[k], reverse=True):
igor@44 648 words_with_freq.append((words[k], k))
igor@40 649
igor@44 650 wgw = find_wordgroups_weights(words_with_freq, normalizator)
igor@55 651 if not 'no_words_grouping' in config or not config['no_words_grouping']:
igor@45 652 words_with_freq = sorted(
igor@44 653 words_with_freq,
igor@40 654 cmp=lambda x,y:compare_word_pairs(x,y, wgw, normalizator, linked_words),
igor@40 655 reverse=True)
igor@40 656
igor@54 657 output = print_words_sorted(
igor@47 658 words_with_freq,
igor@47 659 stats,
igor@47 660 normalizator,
igor@47 661 stats_only=stats_only,
igor@48 662 compressed_wordlist=compressed_wordlist,
igor@48 663 show_range=show_range,
igor@48 664 show_range_percentage=show_range_percentage,
igor@47 665 )
igor@40 666
igor@54 667
igor@54 668 if ('non_interactive' in config or 'text_stats' in config):
igor@54 669 codecs.getwriter("utf-8")(sys.stdout).write("".join(output))
igor@54 670 else:
igor@54 671 (_, temp1) = tempfile.mkstemp(prefix='new-word')
igor@54 672 (_, temp2) = tempfile.mkstemp(prefix='new-word')
igor@54 673
igor@54 674 with codecs.open(temp1, "w", "utf-8") as f:
igor@54 675 f.write("".join(output))
igor@54 676 with codecs.open(temp2, "w", "utf-8") as f:
igor@54 677 f.write("".join(add_notes(output, notes)))
igor@54 678
igor@54 679 os.putenv('ORIGINAL_TEXT', original_text_tempfile)
igor@54 680 os.system((
igor@54 681 "vim"
igor@54 682 " -c 'setlocal spell spelllang={language}'"
igor@54 683 " -c 'set keywordprg={language}'"
igor@54 684 " -c 'set iskeyword=@,48-57,/,.,-,_,+,,,#,$,%,~,=,48-255'"
igor@54 685 " {filename}"
igor@54 686 " < /dev/tty > /dev/tty"
igor@54 687 ).format(language=config['language'], filename=temp2))
igor@54 688
igor@54 689 lines = remove_notes(readlines_from_file(temp2), notes)
igor@54 690
igor@54 691 # compare lines_before and lines_after and return deleted words
igor@54 692 lines_before = output
igor@54 693 lines_after = lines
igor@54 694 deleted_words = []
igor@54 695
igor@54 696 for line in lines_before:
igor@54 697 if line not in lines_after:
igor@54 698 line = line.strip()
igor@54 699 if ' ' in line:
igor@54 700 word = re.split('\s+', line, 1)[1]
igor@54 701 if ' ' in word:
igor@54 702 word = re.split('\s+', word, 1)[0]
igor@54 703 deleted_words.append(word)
igor@54 704
igor@54 705 with codecs.open(voc_filename(), "a", "utf-8") as f:
igor@54 706 f.write("\n".join(deleted_words + ['']))
igor@54 707
igor@54 708 os.unlink(temp1)
igor@54 709 os.unlink(temp2)
igor@54 710
igor@54 711 os.unlink(original_text_tempfile)
igor@54 712
igor@37 713 (options, args) = parser.parse_args()
igor@38 714 if options.language:
igor@38 715 config['language'] = options.language
igor@37 716
igor@54 717 if options.pages:
igor@54 718 config['pages'] = options.pages
igor@54 719 else:
igor@54 720 config['pages'] = ""
igor@54 721
igor@54 722 if options.allowed_words:
igor@54 723 config['allowed_words'] = options.allowed_words
igor@54 724
igor@55 725 if options.show_range:
igor@55 726 config['show_range'] = options.show_range
igor@55 727
igor@54 728 if options.show_range_percentage:
igor@54 729 config['show_range_percentage'] = options.show_range_percentage
igor@54 730
igor@54 731 if options.non_interactive:
igor@54 732 config['non_interactive'] = True
igor@54 733
igor@54 734 if options.text_stats:
igor@54 735 config['text_stats'] = True
igor@54 736
igor@54 737 if options.compressed:
igor@54 738 config['compressed'] = True
igor@54 739
igor@54 740 if options.no_filter:
igor@54 741 config['no_filter'] = True
igor@54 742
igor@54 743 if options.two_words:
igor@54 744 config['two_words'] = True
igor@54 745
igor@54 746 if options.three_words:
igor@54 747 config['three_words'] = True
igor@54 748
igor@55 749 if options.no_words_grouping:
igor@55 750 config['no_words_grouping'] = True
igor@37 751
igor@55 752 filter_get_words_group_words_add_stat(args)
igor@55 753
igor@55 754 #if options.function:
igor@55 755 # function_names = {
igor@55 756 # 'get_words_group_words_add_stat': ,
igor@55 757 # }
igor@55 758 # if options.function in function_names:
igor@55 759 # function_names[options.function](args)
igor@55 760 # else:
igor@55 761 # error_message("Unkown function %s.\nAvailable functions:\n%s" % (
igor@55 762 # options.function, "".join([" "+x for x in sorted(function_names.keys())])))
igor@55 763 # sys.exit(1)
igor@55 764 #
igor@37 765
igor@37 766
igor@37 767
igor@38 768 #os.system("vim")
igor@37 769