new-words
view new-words.py @ 54:e25de9ea9184
new-words.py is almost ready
author | Igor Chubin <igor@chub.in> |
---|---|
date | Tue Nov 01 20:19:18 2011 +0100 (2011-11-01) |
parents | f583256b7ab1 |
children | 2a1a25e61872 |
line source
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
4 from __future__ import with_statement
5 import codecs
6 import difflib
7 import logging
8 import os
9 import optparse
10 import re
11 import subprocess
12 import sys
13 import Stemmer
14 import tempfile
15 try:
16 import psyco
17 psyco.full()
18 except:
19 pass
21 config = {
22 'config_directory': os.environ['HOME'] + '/.new-words',
23 'language': 'en',
24 }
26 logging.basicConfig(filename='/tmp/new-words-py.log', level=logging.DEBUG)
28 class Normalizator:
29 def __init__(self, language, linked_words={}):
30 stemmer_algorithm = {
31 'de' : 'german',
32 'en' : 'english',
33 'es' : 'spanish',
34 'ru' : 'russian',
35 'it' : 'italian',
36 'uk' : 'ukrainian',
37 }
38 self.stemmer = Stemmer.Stemmer(stemmer_algorithm[language])
39 self.linked_words = linked_words
41 def normalize(self, word):
42 word_chain = []
43 while word in self.linked_words and not word in word_chain:
44 word_chain.append(word)
45 word = self.linked_words[word]
46 return self.stemmer.stemWord(word.lower())
48 def best_word_from_group(self, wordpairs_group):
49 """Returns the word that is the most relevant to the wordpairs_group.
51 At the moment: returns the word with minimal length"""
53 def f(x, y):
54 return difflib.SequenceMatcher(
55 None,
56 #(x[-2:] == 'en' and x[:-2].lower() or x.lower()),
57 x.lower(),
58 y.lower()).ratio()
60 minimal_length = min(len(pair[1]) for pair in wordpairs_group)
61 best_match = list(x[1] for x in sorted(
62 (x for x in wordpairs_group if len(x[1]) == minimal_length),
63 key=lambda x:x[0],
64 reverse=True))[0]
66 return best_match
68 suggestions = self.dictionary_suggestions(best_match)
69 if len(suggestions) == 1:
70 return best_match
72 verb = False
73 corrected_best_match = best_match
74 if best_match[-2:] == 'et':
75 word = best_match[:-1]+"n"
76 sugg = self.dictionary_suggestions(word)
77 if len(sugg) == 1:
78 return word
79 suggestions += sugg
80 corrected_best_match = word
81 corrected_best_match = best_match[:-2]
82 verb = True
84 if best_match[-1] == 't':
85 word = best_match[:-1]+"en"
86 sugg = self.dictionary_suggestions(word)
87 if len(sugg) == 1:
88 return word
89 suggestions += sugg
90 corrected_best_match = best_match[:-1]
91 verb = True
93 if corrected_best_match[0].lower() == corrected_best_match[0]:
94 suggestions = [ x for x in suggestions
95 if x[0].lower() == x[0] ]
97 if suggestions == []:
98 return best_match+"_"
99 return best_match+" "+(" ".join(
100 sorted(
101 suggestions,
102 key = lambda x: f(x, corrected_best_match),
103 reverse = True
104 )
105 )
106 )
108 def dictionary_suggestions(self, word):
109 return [
110 x.decode('utf-8').rstrip('\n')
111 for x
112 in subprocess.Popen(
113 ["de-variants", word],
114 stdout=subprocess.PIPE
115 ).stdout.readlines() ]
118 parser = optparse.OptionParser()
120 parser.add_option(
121 "-a", "--no-marks",
122 help="don't add marks (and don't save marks added by user)",
123 action="store_true",
124 dest="no_marks")
126 parser.add_option(
127 "-c", "--compressed",
128 help="show compressed wordlist: one word per group",
129 action="store_true",
130 dest="compressed")
132 parser.add_option(
133 "-k", "--known-words",
134 help="put higher words that are similar to the known words (only for English)",
135 action="store_true",
136 dest="compressed")
138 parser.add_option(
139 "-l", "--language",
140 help="specify language of text",
141 action="store",
142 dest="language")
144 parser.add_option(
145 "-f", "--allowed-words",
146 help="file with list of allowed words (words that will be shown in the output)",
147 action="store",
148 dest="allowed_words")
150 parser.add_option(
151 "-X", "--function",
152 help="filter through subsystem [INTERNAL]",
153 action="store",
154 dest="function")
156 parser.add_option(
157 "-m", "--merge-tag",
158 help="merge words tagged with specified tag into the main vocabulary",
159 action="store",
160 dest="merge_tag")
162 parser.add_option(
163 "-M", "--merge-tagged",
164 help="merge words tagged with ANY tag into the main vocabulary",
165 action="store_true",
166 dest="merge_tagged")
168 parser.add_option(
169 "-n", "--non-interactive",
170 help="non-interactive mode (don't run vi)",
171 action="store_true",
172 dest="non_interactive")
174 parser.add_option(
175 "-N", "--no-filter",
176 help="switch off known words filtering",
177 action="store_true",
178 dest="no_filter")
180 parser.add_option(
181 "-p", "--pages",
182 help="work with specified pages only (pages = start-stop/total )",
183 action="store",
184 dest="pages")
186 parser.add_option(
187 "-d", "--delete-tag",
188 help="delete subvocabulary of specified tag",
189 action="store",
190 dest="delete_tag")
192 parser.add_option(
193 "-R", "--show-range-percentage",
194 help="show only words that cover specified percentage of the text, skip the rest",
195 action="store",
196 dest="show_range_percentage")
198 parser.add_option(
199 "-s", "--text-stats",
200 help="show the text statistics (percentage of known words and so on) and exit",
201 action="store_true",
202 dest="text_stats")
204 parser.add_option(
205 "-S", "--voc-stats",
206 help="show your vocabulary statistics (number of words and word groups)",
207 action="store_true",
208 dest="voc_stats")
210 parser.add_option(
211 "-t", "--tag",
212 help="tag known words with tag",
213 action="store",
214 dest="tag")
216 parser.add_option(
217 "-T", "--show-tags",
218 help="tag known words with tag",
219 action="store_true",
220 dest="show_tags")
222 parser.add_option(
223 "-2", "--two-words",
224 help="find 2 words' sequences",
225 action="store_true",
226 dest="two_words")
228 parser.add_option(
229 "-3", "--three-words",
230 help="find 3 words' sequences",
231 action="store_true",
232 dest="three_words")
234 def readlines_from_file(filename):
235 res = []
236 with codecs.open(filename, "r", "utf-8") as f:
237 for line in f.readlines():
238 res += [line]
239 return res
241 def readlines_from_url(url):
242 return [x.decode('utf-8') for x in
243 subprocess.Popen(
244 "lynx -dump '{url}' | perl -p -e 's@http://[a-zA-Z&_.:/0-9%?=,#+()\[\]~-]*@@'".format(url=url),
245 shell = True,
246 stdout = subprocess.PIPE,
247 stderr = subprocess.STDOUT
248 ).communicate()[0].split('\n')
249 ]
251 def readlines_from_stdin():
252 return codecs.getreader("utf-8")(sys.stdin).readlines()
254 def words_from_line(line):
255 line = line.rstrip('\n')
256 #return re.split('(?:\s|[*\r,.:#@()+=<>$;"?!|\[\]^%&~{}«»–])+', line)
257 #return re.split('[^a-zA-ZäöëüßÄËÖÜß]+', line)
258 return re.compile("(?!['_])(?:\W)+", flags=re.UNICODE).split(line)
260 def get_words(lines, group_by=[1]):
261 """
262 Returns hash of words in a file
263 word => number
264 """
265 result = {}
266 (a, b, c) = ("", "", "")
267 for line in lines:
268 words = words_from_line(line)
269 for word in words:
270 if re.match('[0-9]*$', word):
271 continue
272 result.setdefault(word, 0)
273 result[word] += 1
274 if 2 in group_by and a != "" and b != "":
275 w = "%s_%s" % (a,b)
276 result.setdefault(w, 0)
277 result[w] += 1
278 if 3 in group_by and not "" in [a,b,c]:
279 w = "%s_%s_%s" % (a,b,c)
280 result.setdefault(w, 0)
281 result[w] += 1
282 (a,b,c) = (b, c, word)
284 logging.debug(result)
285 return result
287 def voc_filename():
288 return "%s/%s.txt"%(config['config_directory'], config['language'])
290 def load_vocabulary():
291 return get_words(readlines_from_file(voc_filename()))
293 def notes_filenames():
294 return ["%s/notes-%s.txt"%(config['config_directory'], config['language'])]
296 def load_notes(files):
297 notes = {}
298 for filename in files:
299 with codecs.open(filename, "r", "utf-8") as f:
300 for line in f.readlines():
301 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
302 notes.setdefault(word, {})
303 notes[word][filename] = note
304 return notes
306 def add_notes(lines, notes):
307 notes_filename = notes_filenames()[0]
308 result = []
309 for line in lines:
310 if line.startswith('#'):
311 result += [line]
312 else:
313 match_object = re.search('^\s*\S+\s*(\S+)', line)
314 if match_object:
315 word = match_object.group(1)
316 if word in notes:
317 if notes_filename in notes[word]:
318 line = line.rstrip('\n')
319 line = "%-30s %s\n" % (line, notes[word][notes_filename])
320 result += [line]
321 else:
322 result += [line]
323 else:
324 result += [line]
325 return result
327 def remove_notes(lines, notes_group):
328 notes_filename = notes_filenames()[0]
329 notes = {}
330 for k in notes_group.keys():
331 if notes_filename in notes_group[k]:
332 notes[k] = notes_group[k][notes_filename]
334 result = []
335 for line in lines:
336 line = line.rstrip('\n')
337 match_object = re.match('(\s+)(\S+)(\s+)(\S+)(\s+)(.*)', line)
338 if match_object:
339 result.append("".join([
340 match_object.group(1),
341 match_object.group(2),
342 match_object.group(3),
343 match_object.group(4),
344 "\n"
345 ]))
346 notes[match_object.group(4)] = match_object.group(6)
347 else:
348 result.append(line+"\n")
350 save_notes(notes_filename, notes)
351 return result
353 def save_notes(filename, notes):
354 lines = []
355 saved_words = []
356 with codecs.open(filename, "r", "utf-8") as f:
357 for line in f.readlines():
358 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
359 if word in notes:
360 line = "%-29s %s\n" % (word, notes[word])
361 saved_words.append(word)
362 lines.append(line)
363 for word in [x for x in notes.keys() if not x in saved_words]:
364 line = "%-29s %s\n" % (word, notes[word])
365 lines.append(line)
367 with codecs.open(filename, "w", "utf-8") as f:
368 for line in lines:
369 f.write(line)
372 def substract_dictionary(dict1, dict2):
373 """
374 returns dict1 - dict2
375 """
376 result = {}
377 for (k,v) in dict1.items():
378 if not k in dict2:
379 result[k] = v
380 return result
382 def dump_words(words, filename):
383 with codecs.open(filename, "w+", "utf-8") as f:
384 for word in words.keys():
385 f.write(("%s\n"%word)*words[word])
387 def error_message(text):
388 print text
390 def find_wordgroups_weights(word_pairs, normalizator):
391 weight = {}
392 for (num, word) in word_pairs:
393 normalized = normalizator.normalize(word)
394 weight.setdefault(normalized, 0)
395 weight[normalized] += num
396 return weight
398 def find_linked_words(notes):
399 linked_words = {}
400 for word in notes.keys():
401 for note in notes[word].values():
402 if "@" in note:
403 result = re.search(r'\@(\S*)', note)
404 if result:
405 main_word = result.group(1)
406 if main_word:
407 linked_words[word] = main_word
408 return linked_words
410 def compare_word_pairs(pair1, pair2, wgw, normalizator, linked_words):
411 (num1, word1) = pair1
412 (num2, word2) = pair2
414 normalized_word1 = normalizator.normalize(word1)
415 normalized_word2 = normalizator.normalize(word2)
417 cmp_res = cmp(wgw[normalized_word1], wgw[normalized_word2])
418 if cmp_res != 0:
419 return cmp_res
420 else:
421 cmp_res = cmp(normalized_word1, normalized_word2)
422 if cmp_res != 0:
423 return cmp_res
424 else:
425 return cmp(int(num1), int(num2))
428 def print_words_sorted(
429 word_pairs,
430 stats,
431 normalizator,
432 print_stats=True,
433 stats_only=False,
434 compressed_wordlist=False,
435 show_range=0,
436 show_range_percentage=0,
437 ):
438 result = []
439 if stats_only:
440 #codecs.getwriter("utf-8")(sys.stdout).write(
441 result.append(
442 " ".join([
443 "%-10s" % x for x in [
444 "LANG",
445 "KNOWN%",
446 "UNKNOWN%",
447 "KNOWN",
448 "TOTAL",
449 "WPS",
450 "UWPS*10"
451 ]]) + "\n")
452 result.append(
453 " ".join([
454 "%(language)-10s",
455 "%(percentage)-10.2f",
456 "%(percentage_unknown)-10.2f",
457 "%(total_known)-11d"
458 "%(total)-11d"
459 "%(wps)-11d"
460 "%(uwps)-11d"
461 ]) % stats + "\n")
462 return "".join(result)
464 if print_stats:
465 result.append(
466 "# %(language)s, %(percentage)-7.2f, <%(total_known)s/%(total)s>, <%(groups)s/%(words)s>\n" % stats)
468 level_lines = range(int(float(stats['percentage']))/5*5+5,95,5)+range(90,102)
469 known = int(stats['total_known'])
470 total = int(stats['total'])
471 current_level = 0
472 old_normalized_word = None
473 words_of_this_group = []
474 printed_words = 0
475 for word_pair in word_pairs:
477 normalized_word = normalizator.normalize(word_pair[1])
478 if old_normalized_word and old_normalized_word != normalized_word:
479 if compressed_wordlist:
480 compressed_word_pair = (
481 sum(x[0] for x in words_of_this_group),
482 normalizator.best_word_from_group(words_of_this_group)
483 )
484 result.append("%10s %s\n" % compressed_word_pair)
485 printed_words += 1
486 words_of_this_group = []
488 old_normalized_word = normalized_word
489 words_of_this_group.append(word_pair)
491 if not compressed_wordlist:
492 result.append("%10s %s\n" % word_pair)
493 printed_words += 1
496 known += word_pair[0]
497 if 100.0*known/total >= level_lines[0]:
498 current_level = level_lines[0]
499 while 100.0*known/total > level_lines[0]:
500 current_level = level_lines[0]
501 level_lines = level_lines[1:]
502 result.append("# %s\n" % current_level)
504 if show_range >0 and printed_words >= show_range:
505 break
506 if show_range_percentage >0 and 100.0*known/total >= show_range_percentage:
507 break
509 return result
511 def parse_parts_description(parts_description):
512 """
513 Returns triad (start, stop, step)
514 basing on parts_description string.
515 from-to/step
516 from+delta/step
517 """
519 try:
520 (a, step) = parts_description.split("/", 1)
521 step = int(step)
522 start = 0
523 stop = 0
524 if '-' in a:
525 (start, stop) = a.split("-", 1)
526 start = int(start)
527 stop = int(stop)
528 elif '+' in a:
529 (start, stop) = a.split("+", 1)
530 start = int(start)
531 stop = int(stop)
532 else:
533 start = int(a)
534 stop = start + 1
535 return (start, stop, step)
537 except:
538 raise ValueError("Parts description must be in format: num[[+-]num]/num; this [%s] is incorrect" % parts_description)
541 def take_part(lines, part_description = None):
542 if part_description == None:
543 return lines
544 (start, stop, step) = parse_parts_description(part_description)
545 n = len(lines)
546 part_size = (1.0*n) / step
547 result = []
548 for i in range(n):
549 if i >= start * part_size and i <= stop * part_size:
550 result += [lines[i]]
551 return result
553 def filter_get_words_group_words_add_stat(args):
554 vocabulary = load_vocabulary()
555 notes = load_notes(notes_filenames())
557 if len(args) > 0:
558 if 'http://' in args[0]:
559 input_lines = readlines_from_url(args[0])
560 else:
561 input_lines = readlines_from_file(args[0])
562 else:
563 input_lines = readlines_from_stdin()
565 if len(input_lines) == 0:
566 print >> sys.stderr, "Nothing to do, standard input is empty, exiting."
567 sys.exit(1)
569 lines = take_part(input_lines, config.get('pages', ''))
571 (_, original_text_tempfile) = tempfile.mkstemp(prefix='new-word')
572 with codecs.open(original_text_tempfile, "w", "utf-8") as f:
573 f.write("".join(lines))
575 group_by = [1]
577 if 'two_words' in config:
578 group_by.append(2)
579 if 'three_words' in config:
580 group_by.append(3)
581 words = get_words(lines, group_by)
582 stats_only = False
583 if 'text_stats' in config:
584 stats_only = True
586 compressed_wordlist = False
587 if 'compressed' in config:
588 compressed_wordlist = True
590 show_range = os.environ.get('SHOW_RANGE', '')
591 if show_range != '':
592 show_range = int(show_range)
593 else:
594 show_range = 0
596 if 'show_range_percentage' in config:
597 show_range_percentage = int(config['show_range_percentage'])
598 else:
599 show_range_percentage = 0
602 stats = {}
603 stats['total'] = sum(words[x] for x in words.keys())
604 if not 'no_filter' in config:
605 words = substract_dictionary(words, vocabulary)
607 stats['total_unknown'] = sum(words[x] for x in words.keys())
608 stats['total_known'] = stats['total'] - stats['total_unknown']
609 stats['percentage'] = 100.0*stats['total_known']/stats['total']
610 stats['percentage_unknown'] = 100.0-100.0*stats['total_known']/stats['total']
611 stats['groups'] = 0
612 stats['words'] = len(words)
613 stats['sentences'] = 0 #FIXME
614 stats['wps'] = 0 #FIXME
615 stats['uwps'] = 0 #FIXME
616 stats['language'] = config['language']
618 linked_words = find_linked_words(notes)
619 normalizator = Normalizator(config['language'], linked_words)
621 # filter words by allowed_words_filter
622 if 'allowed_words' in config:
623 allowed_words_filename = config['allowed_words']
624 normalized_allowed_words = [
625 normalizator.normalize(w.rstrip('\n'))
626 for w in readlines_from_file(allowed_words_filename)
627 ]
629 result = {}
630 for w, wn in words.iteritems():
631 if normalizator.normalize(w) in normalized_allowed_words:
632 result[w] = wn
633 words = result
635 words_with_freq = []
636 for k in sorted(words.keys(), key=lambda k: words[k], reverse=True):
637 words_with_freq.append((words[k], k))
639 wgw = find_wordgroups_weights(words_with_freq, normalizator)
640 if 'WORDS_GROUPING' in os.environ and os.environ['WORDS_GROUPING'] == 'YES':
641 words_with_freq = sorted(
642 words_with_freq,
643 cmp=lambda x,y:compare_word_pairs(x,y, wgw, normalizator, linked_words),
644 reverse=True)
646 output = print_words_sorted(
647 words_with_freq,
648 stats,
649 normalizator,
650 stats_only=stats_only,
651 compressed_wordlist=compressed_wordlist,
652 show_range=show_range,
653 show_range_percentage=show_range_percentage,
654 )
657 if ('non_interactive' in config or 'text_stats' in config):
658 codecs.getwriter("utf-8")(sys.stdout).write("".join(output))
659 else:
660 (_, temp1) = tempfile.mkstemp(prefix='new-word')
661 (_, temp2) = tempfile.mkstemp(prefix='new-word')
663 with codecs.open(temp1, "w", "utf-8") as f:
664 f.write("".join(output))
665 with codecs.open(temp2, "w", "utf-8") as f:
666 f.write("".join(add_notes(output, notes)))
668 os.putenv('ORIGINAL_TEXT', original_text_tempfile)
669 os.system((
670 "vim"
671 " -c 'setlocal spell spelllang={language}'"
672 " -c 'set keywordprg={language}'"
673 " -c 'set iskeyword=@,48-57,/,.,-,_,+,,,#,$,%,~,=,48-255'"
674 " {filename}"
675 " < /dev/tty > /dev/tty"
676 ).format(language=config['language'], filename=temp2))
678 lines = remove_notes(readlines_from_file(temp2), notes)
680 # compare lines_before and lines_after and return deleted words
681 lines_before = output
682 lines_after = lines
683 deleted_words = []
685 for line in lines_before:
686 if line not in lines_after:
687 line = line.strip()
688 if ' ' in line:
689 word = re.split('\s+', line, 1)[1]
690 if ' ' in word:
691 word = re.split('\s+', word, 1)[0]
692 deleted_words.append(word)
694 with codecs.open(voc_filename(), "a", "utf-8") as f:
695 f.write("\n".join(deleted_words + ['']))
697 os.unlink(temp1)
698 os.unlink(temp2)
700 os.unlink(original_text_tempfile)
702 (options, args) = parser.parse_args()
703 if options.language:
704 config['language'] = options.language
706 if options.pages:
707 config['pages'] = options.pages
708 else:
709 config['pages'] = ""
711 if options.allowed_words:
712 config['allowed_words'] = options.allowed_words
714 if options.show_range_percentage:
715 config['show_range_percentage'] = options.show_range_percentage
717 if options.non_interactive:
718 config['non_interactive'] = True
720 if options.text_stats:
721 config['text_stats'] = True
723 if options.compressed:
724 config['compressed'] = True
726 if options.no_filter:
727 config['no_filter'] = True
729 if options.two_words:
730 config['two_words'] = True
732 if options.three_words:
733 config['three_words'] = True
735 if options.function:
736 function_names = {
737 'get_words_group_words_add_stat': filter_get_words_group_words_add_stat,
738 }
739 if options.function in function_names:
740 function_names[options.function](args)
741 else:
742 error_message("Unkown function %s.\nAvailable functions:\n%s" % (
743 options.function, "".join([" "+x for x in sorted(function_names.keys())])))
744 sys.exit(1)
749 #os.system("vim")