rev |
line source |
igor@37
|
1 #!/usr/bin/env python
|
igor@38
|
2 # -*- coding: utf-8 -*-
|
igor@37
|
3
|
igor@40
|
4 from __future__ import with_statement
|
igor@38
|
5 import codecs
|
igor@49
|
6 import difflib
|
igor@38
|
7 import logging
|
igor@38
|
8 import os
|
igor@37
|
9 import optparse
|
igor@38
|
10 import re
|
igor@38
|
11 import subprocess
|
igor@38
|
12 import sys
|
igor@38
|
13 import Stemmer
|
igor@54
|
14 import tempfile
|
igor@42
|
15 try:
|
igor@42
|
16 import psyco
|
igor@42
|
17 psyco.full()
|
igor@42
|
18 except:
|
igor@42
|
19 pass
|
igor@38
|
20
|
igor@38
|
21 config = {
|
igor@38
|
22 'config_directory': os.environ['HOME'] + '/.new-words',
|
igor@38
|
23 'language': 'en',
|
igor@38
|
24 }
|
igor@38
|
25
|
igor@38
|
26 logging.basicConfig(filename='/tmp/new-words-py.log', level=logging.DEBUG)
|
igor@38
|
27
|
igor@38
|
28 class Normalizator:
|
igor@38
|
29 def __init__(self, language, linked_words={}):
|
igor@38
|
30 stemmer_algorithm = {
|
igor@38
|
31 'de' : 'german',
|
igor@63
|
32 'fr' : 'french',
|
igor@38
|
33 'en' : 'english',
|
igor@51
|
34 'es' : 'spanish',
|
igor@38
|
35 'ru' : 'russian',
|
igor@51
|
36 'it' : 'italian',
|
igor@38
|
37 'uk' : 'ukrainian',
|
igor@38
|
38 }
|
igor@65
|
39 try:
|
igor@65
|
40 self.stemmer = Stemmer.Stemmer(stemmer_algorithm[language])
|
igor@65
|
41 except:
|
igor@65
|
42 self.stemmer = None
|
igor@38
|
43 self.linked_words = linked_words
|
igor@38
|
44
|
igor@38
|
45 def normalize(self, word):
|
igor@38
|
46 word_chain = []
|
igor@38
|
47 while word in self.linked_words and not word in word_chain:
|
igor@38
|
48 word_chain.append(word)
|
igor@38
|
49 word = self.linked_words[word]
|
igor@65
|
50 if self.stemmer:
|
igor@65
|
51 return self.stemmer.stemWord(word.lower())
|
igor@65
|
52 else:
|
igor@65
|
53 return word.lower()
|
igor@37
|
54
|
igor@47
|
55 def best_word_from_group(self, wordpairs_group):
|
igor@47
|
56 """Returns the word that is the most relevant to the wordpairs_group.
|
igor@47
|
57
|
igor@47
|
58 At the moment: returns the word with minimal length"""
|
igor@49
|
59
|
igor@49
|
60 def f(x, y):
|
igor@49
|
61 return difflib.SequenceMatcher(
|
igor@49
|
62 None,
|
igor@49
|
63 #(x[-2:] == 'en' and x[:-2].lower() or x.lower()),
|
igor@49
|
64 x.lower(),
|
igor@49
|
65 y.lower()).ratio()
|
igor@47
|
66
|
igor@47
|
67 minimal_length = min(len(pair[1]) for pair in wordpairs_group)
|
igor@49
|
68 best_match = list(x[1] for x in sorted(
|
igor@47
|
69 (x for x in wordpairs_group if len(x[1]) == minimal_length),
|
igor@47
|
70 key=lambda x:x[0],
|
igor@47
|
71 reverse=True))[0]
|
igor@47
|
72
|
igor@51
|
73 return best_match
|
igor@51
|
74
|
igor@49
|
75 suggestions = self.dictionary_suggestions(best_match)
|
igor@49
|
76 if len(suggestions) == 1:
|
igor@49
|
77 return best_match
|
igor@49
|
78
|
igor@49
|
79 verb = False
|
igor@49
|
80 corrected_best_match = best_match
|
igor@49
|
81 if best_match[-2:] == 'et':
|
igor@49
|
82 word = best_match[:-1]+"n"
|
igor@49
|
83 sugg = self.dictionary_suggestions(word)
|
igor@49
|
84 if len(sugg) == 1:
|
igor@49
|
85 return word
|
igor@49
|
86 suggestions += sugg
|
igor@49
|
87 corrected_best_match = word
|
igor@49
|
88 corrected_best_match = best_match[:-2]
|
igor@49
|
89 verb = True
|
igor@49
|
90
|
igor@49
|
91 if best_match[-1] == 't':
|
igor@49
|
92 word = best_match[:-1]+"en"
|
igor@49
|
93 sugg = self.dictionary_suggestions(word)
|
igor@49
|
94 if len(sugg) == 1:
|
igor@49
|
95 return word
|
igor@49
|
96 suggestions += sugg
|
igor@49
|
97 corrected_best_match = best_match[:-1]
|
igor@49
|
98 verb = True
|
igor@49
|
99
|
igor@49
|
100 if corrected_best_match[0].lower() == corrected_best_match[0]:
|
igor@49
|
101 suggestions = [ x for x in suggestions
|
igor@49
|
102 if x[0].lower() == x[0] ]
|
igor@49
|
103
|
igor@49
|
104 if suggestions == []:
|
igor@49
|
105 return best_match+"_"
|
igor@49
|
106 return best_match+" "+(" ".join(
|
igor@49
|
107 sorted(
|
igor@49
|
108 suggestions,
|
igor@49
|
109 key = lambda x: f(x, corrected_best_match),
|
igor@49
|
110 reverse = True
|
igor@49
|
111 )
|
igor@49
|
112 )
|
igor@49
|
113 )
|
igor@49
|
114
|
igor@49
|
115 def dictionary_suggestions(self, word):
|
igor@49
|
116 return [
|
igor@49
|
117 x.decode('utf-8').rstrip('\n')
|
igor@49
|
118 for x
|
igor@49
|
119 in subprocess.Popen(
|
igor@49
|
120 ["de-variants", word],
|
igor@49
|
121 stdout=subprocess.PIPE
|
igor@49
|
122 ).stdout.readlines() ]
|
igor@49
|
123
|
igor@49
|
124
|
igor@37
|
125 parser = optparse.OptionParser()
|
igor@37
|
126
|
igor@37
|
127 parser.add_option(
|
igor@37
|
128 "-a", "--no-marks",
|
igor@55
|
129 help="don't add marks (and don't save marks added by user) [NOT IMPLEMENTED YET]",
|
igor@37
|
130 action="store_true",
|
igor@37
|
131 dest="no_marks")
|
igor@37
|
132
|
igor@37
|
133 parser.add_option(
|
igor@37
|
134 "-c", "--compressed",
|
igor@37
|
135 help="show compressed wordlist: one word per group",
|
igor@37
|
136 action="store_true",
|
igor@37
|
137 dest="compressed")
|
igor@37
|
138
|
igor@37
|
139 parser.add_option(
|
igor@37
|
140 "-k", "--known-words",
|
igor@37
|
141 help="put higher words that are similar to the known words (only for English)",
|
igor@37
|
142 action="store_true",
|
igor@37
|
143 dest="compressed")
|
igor@37
|
144
|
igor@37
|
145 parser.add_option(
|
igor@37
|
146 "-l", "--language",
|
igor@37
|
147 help="specify language of text",
|
igor@37
|
148 action="store",
|
igor@37
|
149 dest="language")
|
igor@37
|
150
|
igor@37
|
151 parser.add_option(
|
igor@54
|
152 "-f", "--allowed-words",
|
igor@54
|
153 help="file with list of allowed words (words that will be shown in the output)",
|
igor@54
|
154 action="store",
|
igor@54
|
155 dest="allowed_words")
|
igor@54
|
156
|
igor@54
|
157 parser.add_option(
|
igor@55
|
158 "-G", "--words-grouping",
|
igor@55
|
159 help="turn off word grouping",
|
igor@55
|
160 action="store_true",
|
igor@55
|
161 dest="no_words_grouping")
|
igor@55
|
162
|
igor@55
|
163 parser.add_option(
|
igor@54
|
164 "-X", "--function",
|
igor@38
|
165 help="filter through subsystem [INTERNAL]",
|
igor@38
|
166 action="store",
|
igor@38
|
167 dest="function")
|
igor@38
|
168
|
igor@38
|
169 parser.add_option(
|
igor@37
|
170 "-m", "--merge-tag",
|
igor@55
|
171 help="merge words tagged with specified tag into the main vocabulary [NOT IMPLEMENTED YET]",
|
igor@37
|
172 action="store",
|
igor@37
|
173 dest="merge_tag")
|
igor@37
|
174
|
igor@37
|
175 parser.add_option(
|
igor@37
|
176 "-M", "--merge-tagged",
|
igor@55
|
177 help="merge words tagged with ANY tag into the main vocabulary [NOT IMPLEMENTED YET]",
|
igor@37
|
178 action="store_true",
|
igor@37
|
179 dest="merge_tagged")
|
igor@37
|
180
|
igor@37
|
181 parser.add_option(
|
igor@37
|
182 "-n", "--non-interactive",
|
igor@37
|
183 help="non-interactive mode (don't run vi)",
|
igor@37
|
184 action="store_true",
|
igor@37
|
185 dest="non_interactive")
|
igor@37
|
186
|
igor@37
|
187 parser.add_option(
|
igor@37
|
188 "-N", "--no-filter",
|
igor@37
|
189 help="switch off known words filtering",
|
igor@37
|
190 action="store_true",
|
igor@37
|
191 dest="no_filter")
|
igor@37
|
192
|
igor@37
|
193 parser.add_option(
|
igor@37
|
194 "-p", "--pages",
|
igor@37
|
195 help="work with specified pages only (pages = start-stop/total )",
|
igor@37
|
196 action="store",
|
igor@37
|
197 dest="pages")
|
igor@37
|
198
|
igor@37
|
199 parser.add_option(
|
igor@48
|
200 "-d", "--delete-tag",
|
igor@48
|
201 help="delete subvocabulary of specified tag",
|
igor@37
|
202 action="store",
|
igor@48
|
203 dest="delete_tag")
|
igor@37
|
204
|
igor@37
|
205 parser.add_option(
|
igor@55
|
206 "-r", "--show-range",
|
igor@55
|
207 help="show only words specified number of words",
|
igor@55
|
208 action="store",
|
igor@55
|
209 dest="show_range")
|
igor@55
|
210
|
igor@55
|
211 parser.add_option(
|
igor@54
|
212 "-R", "--show-range-percentage",
|
igor@54
|
213 help="show only words that cover specified percentage of the text, skip the rest",
|
igor@54
|
214 action="store",
|
igor@54
|
215 dest="show_range_percentage")
|
igor@54
|
216
|
igor@54
|
217 parser.add_option(
|
igor@37
|
218 "-s", "--text-stats",
|
igor@37
|
219 help="show the text statistics (percentage of known words and so on) and exit",
|
igor@37
|
220 action="store_true",
|
igor@37
|
221 dest="text_stats")
|
igor@37
|
222
|
igor@37
|
223 parser.add_option(
|
igor@37
|
224 "-S", "--voc-stats",
|
igor@55
|
225 help="show your vocabulary statistics (number of words and word groups) [NOT IMPLEMENTED YET]",
|
igor@37
|
226 action="store_true",
|
igor@37
|
227 dest="voc_stats")
|
igor@37
|
228
|
igor@37
|
229 parser.add_option(
|
igor@37
|
230 "-t", "--tag",
|
igor@37
|
231 help="tag known words with tag",
|
igor@37
|
232 action="store",
|
igor@37
|
233 dest="tag")
|
igor@37
|
234
|
igor@37
|
235 parser.add_option(
|
igor@37
|
236 "-T", "--show-tags",
|
igor@37
|
237 help="tag known words with tag",
|
igor@37
|
238 action="store_true",
|
igor@37
|
239 dest="show_tags")
|
igor@37
|
240
|
igor@37
|
241 parser.add_option(
|
igor@63
|
242 "-v", "--vocabulary-filename",
|
igor@63
|
243 help="use specified file as a vocabulary",
|
igor@63
|
244 action="store",
|
igor@63
|
245 dest="vocabulary_filename")
|
igor@63
|
246
|
igor@63
|
247 parser.add_option(
|
igor@65
|
248 "-w", "--web",
|
igor@65
|
249 help="Web browser version",
|
igor@65
|
250 action="store_true",
|
igor@65
|
251 dest="web")
|
igor@65
|
252
|
igor@65
|
253 parser.add_option(
|
igor@37
|
254 "-2", "--two-words",
|
igor@37
|
255 help="find 2 words' sequences",
|
igor@37
|
256 action="store_true",
|
igor@37
|
257 dest="two_words")
|
igor@37
|
258
|
igor@37
|
259 parser.add_option(
|
igor@37
|
260 "-3", "--three-words",
|
igor@37
|
261 help="find 3 words' sequences",
|
igor@37
|
262 action="store_true",
|
igor@37
|
263 dest="three_words")
|
igor@37
|
264
|
igor@38
|
265 def readlines_from_file(filename):
|
igor@38
|
266 res = []
|
igor@38
|
267 with codecs.open(filename, "r", "utf-8") as f:
|
igor@38
|
268 for line in f.readlines():
|
igor@38
|
269 res += [line]
|
igor@38
|
270 return res
|
igor@38
|
271
|
igor@54
|
272 def readlines_from_url(url):
|
igor@54
|
273 return [x.decode('utf-8') for x in
|
igor@54
|
274 subprocess.Popen(
|
igor@54
|
275 "lynx -dump '{url}' | perl -p -e 's@http://[a-zA-Z&_.:/0-9%?=,#+()\[\]~-]*@@'".format(url=url),
|
igor@54
|
276 shell = True,
|
igor@54
|
277 stdout = subprocess.PIPE,
|
igor@54
|
278 stderr = subprocess.STDOUT
|
igor@54
|
279 ).communicate()[0].split('\n')
|
igor@54
|
280 ]
|
igor@54
|
281
|
igor@38
|
282 def readlines_from_stdin():
|
igor@38
|
283 return codecs.getreader("utf-8")(sys.stdin).readlines()
|
igor@38
|
284
|
igor@38
|
285 def words_from_line(line):
|
igor@38
|
286 line = line.rstrip('\n')
|
igor@38
|
287 #return re.split('(?:\s|[*\r,.:#@()+=<>$;"?!|\[\]^%&~{}«»–])+', line)
|
igor@38
|
288 #return re.split('[^a-zA-ZäöëüßÄËÖÜß]+', line)
|
igor@44
|
289 return re.compile("(?!['_])(?:\W)+", flags=re.UNICODE).split(line)
|
igor@38
|
290
|
igor@44
|
291 def get_words(lines, group_by=[1]):
|
igor@38
|
292 """
|
igor@38
|
293 Returns hash of words in a file
|
igor@38
|
294 word => number
|
igor@38
|
295 """
|
igor@38
|
296 result = {}
|
igor@44
|
297 (a, b, c) = ("", "", "")
|
igor@38
|
298 for line in lines:
|
igor@38
|
299 words = words_from_line(line)
|
igor@38
|
300 for word in words:
|
igor@41
|
301 if re.match('[0-9]*$', word):
|
igor@41
|
302 continue
|
igor@38
|
303 result.setdefault(word, 0)
|
igor@38
|
304 result[word] += 1
|
igor@44
|
305 if 2 in group_by and a != "" and b != "":
|
igor@44
|
306 w = "%s_%s" % (a,b)
|
igor@44
|
307 result.setdefault(w, 0)
|
igor@44
|
308 result[w] += 1
|
igor@44
|
309 if 3 in group_by and not "" in [a,b,c]:
|
igor@44
|
310 w = "%s_%s_%s" % (a,b,c)
|
igor@44
|
311 result.setdefault(w, 0)
|
igor@44
|
312 result[w] += 1
|
igor@44
|
313 (a,b,c) = (b, c, word)
|
igor@44
|
314
|
igor@44
|
315 logging.debug(result)
|
igor@38
|
316 return result
|
igor@38
|
317
|
igor@54
|
318 def voc_filename():
|
igor@63
|
319 if 'vocabulary_filename' in config:
|
igor@63
|
320 return config['vocabulary_filename']
|
igor@54
|
321 return "%s/%s.txt"%(config['config_directory'], config['language'])
|
igor@54
|
322
|
igor@38
|
323 def load_vocabulary():
|
igor@54
|
324 return get_words(readlines_from_file(voc_filename()))
|
igor@38
|
325
|
igor@38
|
326 def notes_filenames():
|
igor@38
|
327 return ["%s/notes-%s.txt"%(config['config_directory'], config['language'])]
|
igor@38
|
328
|
igor@38
|
329 def load_notes(files):
|
igor@38
|
330 notes = {}
|
igor@38
|
331 for filename in files:
|
igor@39
|
332 with codecs.open(filename, "r", "utf-8") as f:
|
igor@38
|
333 for line in f.readlines():
|
igor@38
|
334 (word, note) = re.split('\s+', line.rstrip('\n'), maxsplit=1)
|
igor@38
|
335 notes.setdefault(word, {})
|
igor@38
|
336 notes[word][filename] = note
|
igor@38
|
337 return notes
|
igor@38
|
338
|
igor@39
|
339 def add_notes(lines, notes):
|
igor@39
|
340 notes_filename = notes_filenames()[0]
|
igor@39
|
341 result = []
|
igor@39
|
342 for line in lines:
|
igor@39
|
343 if line.startswith('#'):
|
igor@39
|
344 result += [line]
|
igor@39
|
345 else:
|
igor@39
|
346 match_object = re.search('^\s*\S+\s*(\S+)', line)
|
igor@39
|
347 if match_object:
|
igor@39
|
348 word = match_object.group(1)
|
igor@39
|
349 if word in notes:
|
igor@39
|
350 if notes_filename in notes[word]:
|
igor@39
|
351 line = line.rstrip('\n')
|
igor@39
|
352 line = "%-30s %s\n" % (line, notes[word][notes_filename])
|
igor@39
|
353 result += [line]
|
igor@39
|
354 else:
|
igor@39
|
355 result += [line]
|
igor@39
|
356 else:
|
igor@39
|
357 result += [line]
|
igor@39
|
358 return result
|
igor@39
|
359
|
igor@39
|
360 def remove_notes(lines, notes_group):
|
igor@39
|
361 notes_filename = notes_filenames()[0]
|
igor@39
|
362 notes = {}
|
igor@39
|
363 for k in notes_group.keys():
|
igor@39
|
364 |