rev |
line source |
igor@37
|
1 #!/usr/bin/env python
|
igor@38
|
2 # -*- coding: utf-8 -*-
|
igor@37
|
3
|
igor@40
|
4 from __future__ import with_statement
|
igor@38
|
5 import codecs
|
igor@49
|
6 import difflib
|
igor@38
|
7 import logging
|
igor@38
|
8 import os
|
igor@37
|
9 import optparse
|
igor@38
|
10 import re
|
igor@38
|
11 import subprocess
|
igor@38
|
12 import sys
|
igor@38
|
13 import Stemmer
|
igor@54
|
14 import tempfile
|
igor@42
|
15 try:
|
igor@42
|
16 import psyco
|
igor@42
|
17 psyco.full()
|
igor@42
|
18 except:
|
igor@42
|
19 pass
|
igor@38
|
20
|
igor@38
|
21 config = {
|
igor@38
|
22 'config_directory': os.environ['HOME'] + '/.new-words',
|
igor@38
|
23 'language': 'en',
|
igor@38
|
24 }
|
igor@38
|
25
|
igor@38
|
26 logging.basicConfig(filename='/tmp/new-words-py.log', level=logging.DEBUG)
|
igor@38
|
27
|
igor@38
|
28 class Normalizator:
|
igor@38
|
29 def __init__(self, language, linked_words={}):
|
igor@38
|
30 stemmer_algorithm = {
|
igor@38
|
31 'de' : 'german',
|
igor@63
|
32 'fr' : 'french',
|
igor@38
|
33 'en' : 'english',
|
igor@51
|
34 'es' : 'spanish',
|
igor@38
|
35 'ru' : 'russian',
|
igor@51
|
36 'it' : 'italian',
|
igor@38
|
37 'uk' : 'ukrainian',
|
igor@38
|
38 }
|
igor@38
|
39 self.stemmer = Stemmer.Stemmer(stemmer_algorithm[language])
|
igor@38
|
40 self.linked_words = linked_words
|
igor@38
|
41
|
igor@38
|
42 def normalize(self, word):
|
igor@38
|
43 word_chain = []
|
igor@38
|
44 while word in self.linked_words and not word in word_chain:
|
igor@38
|
45 word_chain.append(word)
|
igor@38
|
46 word = self.linked_words[word]
|
igor@38
|
47 return self.stemmer.stemWord(word.lower())
|
igor@37
|
48
|
igor@47
|
49 def best_word_from_group(self, wordpairs_group):
|
igor@47
|
50 """Returns the word that is the most relevant to the wordpairs_group.
|
igor@47
|
51
|
igor@47
|
52 At the moment: returns the word with minimal length"""
|
igor@49
|
53
|
igor@49
|
54 def f(x, y):
|
igor@49
|
55 return difflib.SequenceMatcher(
|
igor@49
|
56 None,
|
igor@49
|
57 #(x[-2:] == 'en' and x[:-2].lower() or x.lower()),
|
igor@49
|
58 x.lower(),
|
igor@49
|
59 y.lower()).ratio()
|
igor@47
|
60
|
igor@47
|
61 minimal_length = min(len(pair[1]) for pair in wordpairs_group)
|
igor@49
|
62 best_match = list(x[1] for x in sorted(
|
igor@47
|
63 (x for x in wordpairs_group if len(x[1]) == minimal_length),
|
igor@47
|
64 key=lambda x:x[0],
|
igor@47
|
65 reverse=True))[0]
|
igor@47
|
66
|
igor@51
|
67 return best_match
|
igor@51
|
68
|
igor@49
|
69 suggestions = self.dictionary_suggestions(best_match)
|
igor@49
|
70 if len(suggestions) == 1:
|
igor@49
|
71 return best_match
|
igor@49
|
72
|
igor@49
|
73 verb = False
|
igor@49
|
74 corrected_best_match = best_match
|
igor@49
|
75 if best_match[-2:] == 'et':
|
igor@49
|
76 word = best_match[:-1]+"n"
|
igor@49
|
77 sugg = self.dictionary_suggestions(word)
|
igor@49
|
78 if len(sugg) == 1:
|
igor@49
|
79 return word
|
igor@49
|
80 suggestions += sugg
|
igor@49
|
81 corrected_best_match = word
|
igor@49
|
82 corrected_best_match = best_match[:-2]
|
igor@49
|
83 verb = True
|
igor@49
|
84
|
igor@49
|
85 if best_match[-1] == 't':
|
igor@49
|
86 word = best_match[:-1]+"en"
|
igor@49
|
87 sugg = self.dictionary_suggestions(word)
|
igor@49
|
88 if len(sugg) == 1:
|
igor@49
|
89 return word
|
igor@49
|
90 suggestions += sugg
|
igor@49
|
91 corrected_best_match = best_match[:-1]
|
igor@49
|
92 verb = True
|
igor@49
|
93
|
igor@49
|
94 if corrected_best_match[0].lower() == corrected_best_match[0]:
|
igor@49
|
95 suggestions = [ x for x in suggestions
|
igor@49
|
96 if x[0].lower() == x[0] ]
|
igor@49
|
97
|
igor@49
|
98 if suggestions == []:
|
igor@49
|
99 return best_match+"_"
|
igor@49
|
100 return best_match+" "+(" ".join(
|
igor@49
|
101 sorted(
|
igor@49
|
102 suggestions,
|
igor@49
|
103 key = lambda x: f(x, corrected_best_match),
|
igor@49
|
104 reverse = True
|
igor@49
|
105 )
|
igor@49
|
106 )
|
igor@49
|
107 )
|
igor@49
|
108
|
igor@49
|
109 def dictionary_suggestions(self, word):
|
igor@49
|
110 return [
|
igor@49
|
111 x.decode('utf-8').rstrip('\n')
|
igor@49
|
112 for x
|
igor@49
|
113 in subprocess.Popen(
|
igor@49
|
114 ["de-variants", word],
|
igor@49
|
115 stdout=subprocess.PIPE
|
igor@49
|
116 ).stdout.readlines() ]
|
igor@49
|
117
|
igor@49
|
118
|
igor@37
|
119 parser = optparse.OptionParser()
|
igor@37
|
120
|
igor@37
|
121 parser.add_option(
|
igor@37
|
122 "-a", "--no-marks",
|
igor@55
|
123 help="don't add marks (and don't save marks added by user) [NOT IMPLEMENTED YET]",
|
igor@37
|
124 action="store_true",
|
igor@37
|
125 dest="no_marks")
|
igor@37
|
126
|
igor@37
|
127 parser.add_option(
|
igor@37
|
128 "-c", "--compressed",
|
igor@37
|
129 help="show compressed wordlist: one word per group",
|
igor@37
|
130 action="store_true",
|
igor@37
|
131 dest="compressed")
|
igor@37
|
132
|
igor@37
|
133 parser.add_option(
|
igor@37
|
134 "-k", "--known-words",
|
igor@37
|
135 help="put higher words that are similar to the known words (only for English)",
|
igor@37
|
136 action="store_true",
|
igor@37
|
137 dest="compressed")
|
igor@37
|
138
|
igor@37
|
139 parser.add_option(
|
igor@37
|
140 "-l", "--language",
|
igor@37
|
141 help="specify language of text",
|
igor@37
|
142 action="store",
|
igor@37
|
143 dest="language")
|
igor@37
|
144
|
igor@37
|
145 parser.add_option(
|
igor@54
|
146 "-f", "--allowed-words",
|
igor@54
|
147 help="file with list of allowed words (words that will be shown in the output)",
|
igor@54
|
148 action="store",
|
igor@54
|
149 dest="allowed_words")
|
igor@54
|
150
|
igor@54
|
151 parser.add_option(
|
igor@55
|
152 "-G", "--words-grouping",
|
igor@55
|
153 help="turn off word grouping",
|
igor@55
|
154 action="store_true",
|
igor@55
|
155 dest="no_words_grouping")
|
igor@55
|
156
|
igor@55
|
157 parser.add_option(
|
igor@54
|
158 "-X", "--function",
|
igor@38
|
159 help="filter through subsystem [INTERNAL]",
|
igor@38
|
160 action="store",
|
igor@38
|
161 dest="function")
|
igor@38
|
162
|
igor@38
|
163 parser.add_option(
|
igor@37
|
164 "-m", "--merge-tag",
|
igor@55
|
165 help="merge words tagged with specified tag into the main vocabulary [NOT IMPLEMENTED YET]",
|
igor@37
|
166 action="store",
|
igor@37
|
167 dest="merge_tag")
|
igor@37
|
168
|
igor@37
|
169 parser.add_option(
|
igor@37
|
170 "-M", "--merge-tagged",
|
igor@55
|
171 help="merge words tagged with ANY tag into the main vocabulary [NOT IMPLEMENTED YET]",
|
igor@37
|
172 action="store_true",
|
igor@37
|
173 dest="merge_tagged")
|
igor@37
|
174
|
igor@37
|
175 parser.add_option(
|
igor@37
|
176 "-n", "--non-interactive",
|
igor@37
|
177 help="non-interactive mode (don't run vi)",
|
igor@37
|
178 action="store_true",
|
igor@37
|
179 dest="non_interactive")
|
igor@37
|
180
|
igor@37
|
181 parser.add_option(
|
igor@37
|
182 "-N", "--no-filter",
|
igor@37
|
183 help="switch off known words filtering",
|
igor@37
|
184 action="store_true",
|
igor@37
|
185 dest="no_filter")
|
igor@37
|
186
|
igor@37
|
187 parser.add_option(
|
igor@37
|
188 "-p", "--pages",
|
igor@37
|
189 help="work with specified pages only (pages = start-stop/total )",
|
igor@37
|
190 action="store",
|
igor@37
|
191 dest="pages")
|
igor@37
|
192
|
igor@37
|
193 parser.add_option(
|
igor@48
|
194 "-d", "--delete-tag",
|
igor@48
|
195 help="delete subvocabulary of specified tag",
|
igor@37
|
196 action="store",
|
igor@48
|
197 dest="delete_tag")
|
igor@37
|
198
|
igor@37
|
199 parser.add_option(
|
igor@55
|
200 "-r", "--show-range",
|
igor@55
|
201 help="show only words specified number of words",
|
igor@55
|
202 action="store",
|
igor@55
|
203 dest="show_range")
|
igor@55
|
204
|
igor@55
|
205 parser.add_option(
|
igor@54
|
206 "-R", "--show-range-percentage",
|
igor@54
|
207 help="show only words that cover specified percentage of the text, skip the rest",
|
igor@54
|
208 action="store",
|
igor@54
|
209 dest="show_range_percentage")
|
igor@54
|
210
|
igor@54
|
211 parser.add_option(
|
igor@37
|
212 "-s", "--text-stats",
|
igor@37
|
213 help="show the text statistics (percentage of known words and so on) and exit",
|
igor@37
|
214 action="store_true",
|
igor@37
|
215 dest="text_stats")
|
igor@37
|
216
|
igor@37
|
217 parser.add_option(
|
igor@37
|
218 "-S", "--voc-stats",
|
igor@55
|
219 help="show your vocabulary statistics (number of words and word groups) [NOT IMPLEMENTED YET]",
|
igor@37
|
220 action="store_true",
|
igor@37
|
221 dest="voc_stats")
|
igor@37
|
222
|
igor@37
|
223 parser.add_option(
|
igor@37
|
224 "-t", "--tag",
|
igor@37
|
225 help="tag known words with tag",
|
igor@37
|
226 action="store",
|
igor@37
|
227 dest="tag")
|
igor@37
|
228
|
igor@37
|
229 parser.add_option(
|
igor@37
|
230 "-T", "--show-tags",
|
igor@37
|
231 help="tag known words with tag",
|
igor@37
|
232 action="store_true",
|
igor@37
|
233 dest="show_tags")
|
igor@37
|
234
|
igor@37
|
235 parser.add_option(
|
igor@63
|
236 "-v", "--vocabulary-filename",
|
igor@63
|
237 help="use specified file as a vocabulary",
|
igor@63
|
238 action="store",
|
igor@63
|
239 dest="vocabulary_filename")
|
igor@63
|
240
|
igor@63
|
241 parser.add_option(
|
igor@37
|
242 "-2", "--two-words",
|
igor@37
|
243 help="find 2 words' sequences",
|
igor@37
|
244 action="store_true",
|
igor@37
|
245 dest="two_words")
|
igor@37
|
246
|
igor@37
|
247 parser.add_option(
|
igor@37
|
248 "-3", "--three-words",
|
igor@37
|
249 help="find 3 words' sequences",
|
igor@37
|
250 action="store_true",
|
igor@37
|
251 dest="three_words")
|
igor@37
|
252
|
igor@38
|
253 def readlines_from_file(filename):
|
igor@38
|
254 res = []
|
igor@38
|
255 with codecs.open(filename, "r", "utf-8") as f:
|
igor@38
|
256 for line in f.readlines():
|
igor@38
|
257 res += [line]
|
igor@38
|
258 return res
|
igor@38
|
259
|
igor@54
|
260 def readlines_from_url(url):
|
igor@54
|
261 return [x.decode('utf-8') for x in
|
igor@54
|
262 subprocess.Popen(
|
igor@54
|
263 "lynx -dump '{url}' | perl -p -e 's@http://[a-zA-Z&_.:/0-9%?=,#+()\[\]~-]*@@'".format(url=url),
|
igor@54
|
264 shell = True,
|
igor@54
|
265 stdout = subprocess.PIPE,
|
igor@54
|
266 stderr = subprocess.STDOUT
|
igor@54
|
267 ).communicate()[0].split('\n')
|
igor@54
|
268 ]
|
igor@54
|
269
|
igor@38
|
270 def readlines_from_stdin():
|
igor@38
|
271 return codecs.getreader("utf-8")(sys.stdin).readlines()
|
igor@38
|
272
|
igor@38
|
273 def words_from_line(line):
|
igor@38
|
274 line = line.rstrip('\n')
|
igor@38
|
275 #return re.split('(?:\s|[*\r,.:#@()+=<>$;"?!|\[\]^%&~{}«»–])+', line)
|
igor@38
|
276 #return re.split('[^a-zA-ZäöëüßÄËÖÜß]+', line)
|
igor@44
|
277 return re.compile("(?!['_])(?:\W)+", flags=re.UNICODE).split(line)
|
igor@38
|
278
|
igor@44
|
279 def get_words(lines, group_by=[1]):
|
igor@38
|
280 """
|
igor@38
|
281 Returns hash of words in a file
|
igor@38
|
282 word => number
|
igor@38
|
283 """
|
igor@38
|
284 result = {}
|
igor@44
|
285 (a, b, c) = ("", "", "")
|
igor@38
|
286 for line in lines:
|
igor@38
|
287 words = words_from_line(line)
|
igor@38
|
288 for word in words:
|
igor@41
|
289 if re.match('[0-9]*$', word):
|
igor@41
|
290 continue
|
igor@38
|
291 result.setdefault(word, 0)
|
igor@38
|
292 result[word] += 1
|
igor@44
|
293 if 2 in group_by and a != "" and b != "":
|
igor@44
|
294 w = "%s_%s" % (a,b)
|
igor@44
|
295 result.setdefault(w, 0)
|
igor@44
|
296 result[w] += 1
|
igor@44
|
297 if 3 in group_by and not "" in [a,b,c]:
|
igor@44
|
298 w = "%s_%s_%s" % (a,b,c)
|
igor@44
|
299 result.setdefault(w, 0)
|
igor@44
|
300 result[w] += 1
|
igor@44
|
301 (a,b,c) = (b, c, word)
|
igor@44
|
302
|
igor@44
|
303 logging.debug(result)
|
igor@38
|
304 return result
|
igor@38
|
305
|
igor@54
|
306 def voc_filename():
|
igor@63
|
307 if 'vocabulary_filename' in config:
|
igor@63
|
308 return config['vocabulary_filename']
|
igor@54
|
309 return "%s/%s.txt"%(config['config_directory'], config['language'])
|
igor@54
|
310
|
igor@38
|
311 def load_vocabulary():
|
igor@54
|
312 return get_words(readlines_from_file(voc_filename()))
|
igor@38
|
313
|
igor@38
|
314 def notes_filenames():
|
igor@38
|
315 return ["%s/notes-%s.txt"%(config['config_directory'], config['language'])]
|
igor@38
|
316
|
igor@38
|
317 def load_notes(files):
|
igor@38
|
318 notes = {}
|
igor@38
|
319 for filename in files:
|
igor@39
|
320 with codecs.open(filename, "r", "utf-8") as f:
|
igor@38
|
321 for line in f.readlines():
|
igor@38
|
322 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
|
igor@38
|
323 notes.setdefault(word, {})
|
igor@38
|
324 notes[word][filename] = note
|
igor@38
|
325 return notes
|
igor@38
|
326
|
igor@39
|
327 def add_notes(lines, notes):
|
igor@39
|
328 notes_filename = notes_filenames()[0]
|
igor@39
|
329 result = []
|
igor@39
|
330 for line in lines:
|
igor@39
|
331 if line.startswith('#'):
|
igor@39
|
332 result += [line]
|
igor@39
|
333 else:
|
igor@39
|
334 match_object = re.search('^\s*\S+\s*(\S+)', line)
|
igor@39
|
335 if match_object:
|
igor@39
|
336 word = match_object.group(1)
|
igor@39
|
337 if word in notes:
|
igor@39
|
338 if notes_filename in notes[word]:
|
igor@39
|
339 line = line.rstrip('\n')
|
igor@39
|
340 line = "%-30s %s\n" % (line, notes[word][notes_filename])
|
igor@39
|
341 result += [line]
|
igor@39
|
342 else:
|
igor@39
|
343 result += [line]
|
igor@39
|
344 else:
|
igor@39
|
345 result += [line]
|
igor@39
|
346 return result
|
igor@39
|
347
|
igor@39
|
348 def remove_notes(lines, notes_group):
|
igor@39
|
349 notes_filename = notes_filenames()[0]
|
igor@39
|
350 notes = {}
|
igor@39
|
351 for k in notes_group.keys():
|
igor@39
|
352 if notes_filename in notes_group[k]:
|
igor@39
|
353 notes[k] = notes_group[k][notes_filename]
|
igor@39
|
354
|
igor@39
|
355 result = []
|
igor@39
|
356 for line in lines:
|
igor@39
|
357 line = line.rstrip('\n')
|
igor@39
|
358 match_object = re.match('(\s+)(\S+)(\s+)(\S+)(\s+)(.*)', line)
|
igor@39
|
359 if match_object:
|
igor@39
|
360 result.append("".join([
|
igor@39
|
361 match_object.group(1),
|
igor@39
|
362 match_object.group(2),
|
igor@39
|
363 match_object.group(3),
|
igor@39
|
364 match_object.group(4),
|
igor@39
|
365 "\n"
|
igor@39
|
366 ]))
|
igor@39
|
367 notes[match_object.group(4)] = match_object.group(6)
|
igor@39
|
368 else:
|
igor@39
|
369 result.append(line+"\n")
|
igor@39
|
370
|
igor@39
|
371 save_notes(notes_filename, notes)
|
igor@39
|
372 return result
|
igor@39
|
373
|
igor@39
|
374 def save_notes(filename, notes):
|
igor@39
|
375 lines = []
|
igor@39
|
376 saved_words = []
|
igor@39
|
377 with codecs.open(filename, "r", "utf-8") as f:
|
igor@39
|
378 for line in f.readlines():
|
igor@39
|
379 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
|
igor@39
|
380 if word in notes:
|
igor@39
|
381 line = "%-29s %s\n" % (word, notes[word])
|
igor@39
|
382 saved_words.append(word)
|
igor@39
|
383 lines.append(line)
|
igor@39
|
384 for word in [x for x in notes.keys() if not x in saved_words]:
|
igor@39
|
385 line = "%-29s %s\n" % (word, notes[word])
|
igor@39
|
386 lines.append(line)
|
igor@39
|
387
|
igor@39
|
388 with codecs.open(filename, "w", "utf-8") as f:
|
igor@39
|
389 for line in lines:
|
igor@39
|
390 f.write(line)
|
igor@39
|
391
|
igor@39
|
392
|
igor@38
|
393 def substract_dictionary(dict1, dict2):
|
igor@38
|
394 """
|
igor@38
|
395 returns dict1 - dict2
|
igor@38
|
396 """
|
igor@38
|
397 result = {}
|
igor@38
|
398 for (k,v) in dict1.items():
|
igor@38
|
399 if not k in dict2:
|
igor@38
|
400 result[k] = v
|
igor@38
|
401 return result
|
igor@38
|
402
|
igor@38
|
403 def dump_words(words, filename):
|
igor@38
|
404 with codecs.open(filename, "w+", "utf-8") as f:
|
igor@38
|
405 for word in words.keys():
|
igor@38
|
406 f.write(("%s\n"%word)*words[word])
|
igor@38
|
407
|
igor@38
|
408 def error_message(text):
|
igor@38
|
409 print text
|
igor@38
|
410
|
igor@40
|
411 def find_wordgroups_weights(word_pairs, normalizator):
|
igor@38
|
412 weight = {}
|
igor@40
|
413 for (num, word) in word_pairs:
|
igor@38
|
414 normalized = normalizator.normalize(word)
|
igor@38
|
415 weight.setdefault(normalized, 0)
|
igor@40
|
416 weight[normalized] += num
|
igor@38
|
417 return weight
|
igor@38
|
418
|
igor@38
|
419 def find_linked_words(notes):
|
igor@38
|
420 linked_words = {}
|
igor@38
|
421 for word in notes.keys():
|
igor@38
|
422 for note in notes[word].values():
|
igor@38
|
423 if "@" in note:
|
igor@38
|
424 result = re.search(r'\@(\S*)', note)
|
igor@38
|
425 if result:
|
igor@38
|
426 main_word = result.group(1)
|
igor@38
|
427 if main_word:
|
igor@38
|
428 linked_words[word] = main_word
|
igor@38
|
429 return linked_words
|
igor@38
|
430
|
igor@40
|
431 def compare_word_pairs(pair1, pair2, wgw, normalizator, linked_words):
|
igor@40
|
432 (num1, word1) = pair1
|
igor@40
|
433 (num2, word2) = pair2
|
igor@38
|
434
|
igor@38
|
435 normalized_word1 = normalizator.normalize(word1)
|
igor@38
|
436 normalized_word2 = normalizator.normalize(word2)
|
igor@38
|
437
|
igor@38
|
438 cmp_res = cmp(wgw[normalized_word1], wgw[normalized_word2])
|
igor@38
|
439 if cmp_res != 0:
|
igor@38
|
440 return cmp_res
|
igor@38
|
441 else:
|
igor@38
|
442 cmp_res = cmp(normalized_word1, normalized_word2)
|
igor@38
|
443 if cmp_res != 0:
|
igor@38
|
444 return cmp_res
|
igor@38
|
445 else:
|
igor@38
|
446 return cmp(int(num1), int(num2))
|
igor@38
|
447
|
igor@47
|
448
|
igor@48
|
449 def print_words_sorted(
|
igor@48
|
450 word_pairs,
|
igor@48
|
451 stats,
|
igor@48
|
452 normalizator,
|
igor@48
|
453 print_stats=True,
|
igor@48
|
454 stats_only=False,
|
igor@48
|
455 compressed_wordlist=False,
|
igor@48
|
456 show_range=0,
|
igor@48
|
457 show_range_percentage=0,
|
igor@48
|
458 ):
|
igor@54
|
459 result = []
|
igor@40
|
460 if stats_only:
|
igor@54
|
461 #codecs.getwriter("utf-8")(sys.stdout).write(
|
igor@54
|
462 result.append(
|
igor@43
|
463 " ".join([
|
igor@43
|
464 "%-10s" % x for x in [
|
igor@43
|
465 "LANG",
|
igor@43
|
466 "KNOWN%",
|
igor@43
|
467 "UNKNOWN%",
|
igor@43
|
468 "KNOWN",
|
igor@43
|
469 "TOTAL",
|
igor@43
|
470 "WPS",
|
igor@43
|
471 "UWPS*10"
|
igor@43
|
472 ]]) + "\n")
|
igor@54
|
473 result.append(
|
igor@43
|
474 " ".join([
|
igor@43
|
475 "%(language)-10s",
|
igor@43
|
476 "%(percentage)-10.2f",
|
igor@43
|
477 "%(percentage_unknown)-10.2f",
|
igor@43
|
478 "%(total_known)-11d"
|
igor@43
|
479 "%(total)-11d"
|
igor@43
|
480 "%(wps)-11d"
|
igor@43
|
481 "%(uwps)-11d"
|
igor@43
|
482 ]) % stats + "\n")
|
igor@54
|
483 return "".join(result)
|
igor@38
|
484
|
igor@40
|
485 if print_stats:
|
igor@54
|
486 result.append(
|
igor@43
|
487 "# %(language)s, %(percentage)-7.2f, <%(total_known)s/%(total)s>, <%(groups)s/%(words)s>\n" % stats)
|
igor@38
|
488
|
igor@40
|
489 level_lines = range(int(float(stats['percentage']))/5*5+5,95,5)+range(90,102)
|
igor@40
|
490 known = int(stats['total_known'])
|
igor@40
|
491 total = int(stats['total'])
|
igor@40
|
492 current_level = 0
|
igor@47
|
493 old_normalized_word = None
|
igor@47
|
494 words_of_this_group = []
|
igor@48
|
495 printed_words = 0
|
igor@40
|
496 for word_pair in word_pairs:
|
igor@47
|
497
|
igor@47
|
498 normalized_word = normalizator.normalize(word_pair[1])
|
igor@47
|
499 if old_normalized_word and old_normalized_word != normalized_word:
|
igor@47
|
500 if compressed_wordlist:
|
igor@49
|
501 compressed_word_pair = (
|
igor@49
|
502 sum(x[0] for x in words_of_this_group),
|
igor@49
|
503 normalizator.best_word_from_group(words_of_this_group)
|
igor@49
|
504 )
|
igor@54
|
505 result.append("%10s %s\n" % compressed_word_pair)
|
igor@48
|
506 printed_words += 1
|
igor@47
|
507 words_of_this_group = []
|
igor@47
|
508
|
igor@47
|
509 old_normalized_word = normalized_word
|
igor@47
|
510 words_of_this_group.append(word_pair)
|
igor@47
|
511
|
igor@47
|
512 if not compressed_wordlist:
|
igor@54
|
513 result.append("%10s %s\n" % word_pair)
|
igor@48
|
514 printed_words += 1
|
igor@47
|
515
|
igor@47
|
516
|
igor@40
|
517 known += word_pair[0]
|
igor@40
|
518 if 100.0*known/total >= level_lines[0]:
|
igor@40
|
519 current_level = level_lines[0]
|
igor@40
|
520 while 100.0*known/total > level_lines[0]:
|
igor@40
|
521 current_level = level_lines[0]
|
igor@40
|
522 level_lines = level_lines[1:]
|
igor@54
|
523 result.append("# %s\n" % current_level)
|
igor@38
|
524
|
igor@48
|
525 if show_range >0 and printed_words >= show_range:
|
igor@48
|
526 break
|
igor@48
|
527 if show_range_percentage >0 and 100.0*known/total >= show_range_percentage:
|
igor@48
|
528 break
|
igor@48
|
529
|
igor@54
|
530 return result
|
igor@39
|
531
|
igor@53
|
532 def parse_parts_description(parts_description):
|
igor@53
|
533 """
|
igor@53
|
534 Returns triad (start, stop, step)
|
igor@53
|
535 basing on parts_description string.
|
igor@53
|
536 from-to/step
|
igor@53
|
537 from+delta/step
|
igor@53
|
538 """
|
igor@53
|
539
|
igor@53
|
540 try:
|
igor@53
|
541 (a, step) = parts_description.split("/", 1)
|
igor@53
|
542 step = int(step)
|
igor@53
|
543 start = 0
|
igor@53
|
544 stop = 0
|
igor@53
|
545 if '-' in a:
|
igor@53
|
546 (start, stop) = a.split("-", 1)
|
igor@53
|
547 start = int(start)
|
igor@53
|
548 stop = int(stop)
|
igor@53
|
549 elif '+' in a:
|
igor@53
|
550 (start, stop) = a.split("+", 1)
|
igor@53
|
551 start = int(start)
|
igor@53
|
552 stop = int(stop)
|
igor@53
|
553 else:
|
igor@53
|
554 start = int(a)
|
igor@53
|
555 stop = start + 1
|
igor@53
|
556 return (start, stop, step)
|
igor@53
|
557
|
igor@53
|
558 except:
|
igor@54
|
559 raise ValueError("Parts description must be in format: num[[+-]num]/num; this [%s] is incorrect" % parts_description)
|
igor@53
|
560
|
igor@53
|
561
|
igor@53
|
562 def take_part(lines, part_description = None):
|
igor@55
|
563 if part_description == None or part_description == '':
|
igor@53
|
564 return lines
|
igor@53
|
565 (start, stop, step) = parse_parts_description(part_description)
|
igor@53
|
566 n = len(lines)
|
igor@53
|
567 part_size = (1.0*n) / step
|
igor@53
|
568 result = []
|
igor@53
|
569 for i in range(n):
|
igor@54
|
570 if i >= start * part_size and i <= stop * part_size:
|
igor@54
|
571 result += [lines[i]]
|
igor@53
|
572 return result
|
igor@53
|
573
|
igor@40
|
574 def filter_get_words_group_words_add_stat(args):
|
igor@40
|
575 vocabulary = load_vocabulary()
|
igor@40
|
576 notes = load_notes(notes_filenames())
|
igor@54
|
577
|
igor@54
|
578 if len(args) > 0:
|
igor@54
|
579 if 'http://' in args[0]:
|
igor@54
|
580 input_lines = readlines_from_url(args[0])
|
igor@54
|
581 else:
|
igor@54
|
582 input_lines = readlines_from_file(args[0])
|
igor@54
|
583 else:
|
igor@54
|
584 input_lines = readlines_from_stdin()
|
igor@54
|
585
|
igor@54
|
586 if len(input_lines) == 0:
|
igor@54
|
587 print >> sys.stderr, "Nothing to do, standard input is empty, exiting."
|
igor@54
|
588 sys.exit(1)
|
igor@54
|
589
|
igor@54
|
590 lines = take_part(input_lines, config.get('pages', ''))
|
igor@54
|
591
|
igor@54
|
592 (_, original_text_tempfile) = tempfile.mkstemp(prefix='new-word')
|
igor@54
|
593 with codecs.open(original_text_tempfile, "w", "utf-8") as f:
|
igor@54
|
594 f.write("".join(lines))
|
igor@54
|
595
|
igor@44
|
596 group_by = [1]
|
igor@48
|
597
|
igor@54
|
598 if 'two_words' in config:
|
igor@44
|
599 group_by.append(2)
|
igor@54
|
600 if 'three_words' in config:
|
igor@44
|
601 group_by.append(3)
|
igor@44
|
602 words = get_words(lines, group_by)
|
igor@43
|
603 stats_only = False
|
igor@54
|
604 if 'text_stats' in config:
|
igor@43
|
605 stats_only = True
|
igor@40
|
606
|
igor@47
|
607 compressed_wordlist = False
|
igor@54
|
608 if 'compressed' in config:
|
igor@47
|
609 compressed_wordlist = True
|
igor@47
|
610
|
igor@55
|
611 if 'show_range' in config:
|
igor@55
|
612 show_range = int(config['show_range'])
|
igor@48
|
613 else:
|
igor@48
|
614 show_range = 0
|
igor@54
|
615
|
igor@54
|
616 if 'show_range_percentage' in config:
|
igor@54
|
617 show_range_percentage = int(config['show_range_percentage'])
|
igor@48
|
618 else:
|
igor@48
|
619 show_range_percentage = 0
|
igor@48
|
620
|
igor@44
|
621
|
igor@40
|
622 stats = {}
|
igor@40
|
623 stats['total'] = sum(words[x] for x in words.keys())
|
igor@54
|
624 if not 'no_filter' in config:
|
igor@45
|
625 words = substract_dictionary(words, vocabulary)
|
igor@40
|
626
|
igor@40
|
627 stats['total_unknown'] = sum(words[x] for x in words.keys())
|
igor@40
|
628 stats['total_known'] = stats['total'] - stats['total_unknown']
|
igor@43
|
629 stats['percentage'] = 100.0*stats['total_known']/stats['total']
|
igor@43
|
630 stats['percentage_unknown'] = 100.0-100.0*stats['total_known']/stats['total']
|
igor@40
|
631 stats['groups'] = 0
|
igor@40
|
632 stats['words'] = len(words)
|
igor@43
|
633 stats['sentences'] = 0 #FIXME
|
igor@43
|
634 stats['wps'] = 0 #FIXME
|
igor@43
|
635 stats['uwps'] = 0 #FIXME
|
igor@40
|
636 stats['language'] = config['language']
|
igor@40
|
637
|
igor@40
|
638 linked_words = find_linked_words(notes)
|
igor@40
|
639 normalizator = Normalizator(config['language'], linked_words)
|
igor@40
|
640
|
igor@50
|
641 # filter words by allowed_words_filter
|
igor@54
|
642 if 'allowed_words' in config:
|
igor@54
|
643 allowed_words_filename = config['allowed_words']
|
igor@50
|
644 normalized_allowed_words = [
|
igor@50
|
645 normalizator.normalize(w.rstrip('\n'))
|
igor@50
|
646 for w in readlines_from_file(allowed_words_filename)
|
igor@50
|
647 ]
|
igor@50
|
648
|
igor@50
|
649 result = {}
|
igor@50
|
650 for w, wn in words.iteritems():
|
igor@50
|
651 if normalizator.normalize(w) in normalized_allowed_words:
|
igor@50
|
652 result[w] = wn
|
igor@50
|
653 words = result
|
igor@50
|
654
|
igor@44
|
655 words_with_freq = []
|
igor@40
|
656 for k in sorted(words.keys(), key=lambda k: words[k], reverse=True):
|
igor@44
|
657 words_with_freq.append((words[k], k))
|
igor@40
|
658
|
igor@44
|
659 wgw = find_wordgroups_weights(words_with_freq, normalizator)
|
igor@55
|
660 if not 'no_words_grouping' in config or not config['no_words_grouping']:
|
igor@45
|
661 words_with_freq = sorted(
|
igor@44
|
662 words_with_freq,
|
igor@40
|
663 cmp=lambda x,y:compare_word_pairs(x,y, wgw, normalizator, linked_words),
|
igor@40
|
664 reverse=True)
|
igor@40
|
665
|
igor@54
|
666 output = print_words_sorted(
|
igor@47
|
667 words_with_freq,
|
igor@47
|
668 stats,
|
igor@47
|
669 normalizator,
|
igor@47
|
670 stats_only=stats_only,
|
igor@48
|
671 compressed_wordlist=compressed_wordlist,
|
igor@48
|
672 show_range=show_range,
|
igor@48
|
673 show_range_percentage=show_range_percentage,
|
igor@47
|
674 )
|
igor@40
|
675
|
igor@54
|
676
|
igor@54
|
677 if ('non_interactive' in config or 'text_stats' in config):
|
igor@54
|
678 codecs.getwriter("utf-8")(sys.stdout).write("".join(output))
|
igor@54
|
679 else:
|
igor@54
|
680 (_, temp1) = tempfile.mkstemp(prefix='new-word')
|
igor@54
|
681 (_, temp2) = tempfile.mkstemp(prefix='new-word')
|
igor@54
|
682
|
igor@54
|
683 with codecs.open(temp1, "w", "utf-8") as f:
|
igor@54
|
684 f.write("".join(output))
|
igor@54
|
685 with codecs.open(temp2, "w", "utf-8") as f:
|
igor@54
|
686 f.write("".join(add_notes(output, notes)))
|
igor@54
|
687
|
igor@54
|
688 os.putenv('ORIGINAL_TEXT', original_text_tempfile)
|
igor@54
|
689 os.system((
|
igor@54
|
690 "vim"
|
igor@54
|
691 " -c 'setlocal spell spelllang={language}'"
|
igor@54
|
692 " -c 'set keywordprg={language}'"
|
igor@54
|
693 " -c 'set iskeyword=@,48-57,/,.,-,_,+,,,#,$,%,~,=,48-255'"
|
igor@54
|
694 " {filename}"
|
igor@54
|
695 " < /dev/tty > /dev/tty"
|
igor@54
|
696 ).format(language=config['language'], filename=temp2))
|
igor@54
|
697
|
igor@54
|
698 lines = remove_notes(readlines_from_file(temp2), notes)
|
igor@54
|
699
|
igor@54
|
700 # compare lines_before and lines_after and return deleted words
|
igor@54
|
701 lines_before = output
|
igor@54
|
702 lines_after = lines
|
igor@54
|
703 deleted_words = []
|
igor@54
|
704
|
igor@60
|
705 lines_after_set = set(lines_after)
|
igor@54
|
706 for line in lines_before:
|
igor@60
|
707 if line not in lines_after_set:
|
igor@54
|
708 line = line.strip()
|
igor@54
|
709 if ' ' in line:
|
igor@54
|
710 word = re.split('\s+', line, 1)[1]
|
igor@54
|
711 if ' ' in word:
|
igor@54
|
712 word = re.split('\s+', word, 1)[0]
|
igor@54
|
713 deleted_words.append(word)
|
igor@54
|
714
|
igor@54
|
715 with codecs.open(voc_filename(), "a", "utf-8") as f:
|
igor@54
|
716 f.write("\n".join(deleted_words + ['']))
|
igor@54
|
717
|
igor@54
|
718 os.unlink(temp1)
|
igor@54
|
719 os.unlink(temp2)
|
igor@54
|
720
|
igor@54
|
721 os.unlink(original_text_tempfile)
|
igor@54
|
722
|
igor@37
|
723 (options, args) = parser.parse_args()
|
igor@38
|
724 if options.language:
|
igor@38
|
725 config['language'] = options.language
|
igor@37
|
726
|
igor@54
|
727 if options.pages:
|
igor@54
|
728 config['pages'] = options.pages
|
igor@54
|
729 else:
|
igor@54
|
730 config['pages'] = ""
|
igor@54
|
731
|
igor@54
|
732 if options.allowed_words:
|
igor@54
|
733 config['allowed_words'] = options.allowed_words
|
igor@54
|
734
|
igor@55
|
735 if options.show_range:
|
igor@55
|
736 config['show_range'] = options.show_range
|
igor@55
|
737
|
igor@54
|
738 if options.show_range_percentage:
|
igor@54
|
739 config['show_range_percentage'] = options.show_range_percentage
|
igor@54
|
740
|
igor@54
|
741 if options.non_interactive:
|
igor@54
|
742 config['non_interactive'] = True
|
igor@54
|
743
|
igor@54
|
744 if options.text_stats:
|
igor@54
|
745 config['text_stats'] = True
|
igor@54
|
746
|
igor@54
|
747 if options.compressed:
|
igor@54
|
748 config['compressed'] = True
|
igor@54
|
749
|
igor@54
|
750 if options.no_filter:
|
igor@54
|
751 config['no_filter'] = True
|
igor@54
|
752
|
igor@54
|
753 if options.two_words:
|
igor@54
|
754 config['two_words'] = True
|
igor@54
|
755
|
igor@54
|
756 if options.three_words:
|
igor@54
|
757 config['three_words'] = True
|
igor@54
|
758
|
igor@55
|
759 if options.no_words_grouping:
|
igor@55
|
760 config['no_words_grouping'] = True
|
igor@37
|
761
|
igor@55
|
762 filter_get_words_group_words_add_stat(args)
|
igor@55
|
763
|
igor@55
|
764 #if options.function:
|
igor@55
|
765 # function_names = {
|
igor@55
|
766 # 'get_words_group_words_add_stat': ,
|
igor@55
|
767 # }
|
igor@55
|
768 # if options.function in function_names:
|
igor@55
|
769 # function_names[options.function](args)
|
igor@55
|
770 # else:
|
igor@55
|
771 # error_message("Unkown function %s.\nAvailable functions:\n%s" % (
|
igor@55
|
772 # options.function, "".join([" "+x for x in sorted(function_names.keys())])))
|
igor@55
|
773 # sys.exit(1)
|
igor@55
|
774 #
|
igor@37
|
775
|
igor@37
|
776
|
igor@37
|
777
|
igor@38
|
778 #os.system("vim")
|
igor@37
|
779
|