-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathngrams.py
More file actions
82 lines (68 loc) · 3.04 KB
/
ngrams.py
File metadata and controls
82 lines (68 loc) · 3.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import csv
import os
import nltk
from nltk.collocations import *
from parse_debates import DebateParser
from nltk.tokenize import RegexpTokenizer
'''
this module calculates ngrams which appear to be significant to the context of the text, based on likelihood (g-tests)
'''
def removeNonAscii(s):
return "".join(filter(lambda x: ord(x)<128, s))
def get_bigram_likelihood(statements, freq_filter=3, nbest=200):
"""
Returns n (likelihood ratio) bi-grams from a group of documents
:param statements: list of strings
:param output_file: output path for saved file
:param freq_filter: filter for # of appearances in bi-gram
:param nbest: likelihood ratio for bi-grams
"""
words = list()
print 'Generating word list...'
#tokenize sentence into words
for statement in statements:
# remove non-words
tokenizer = RegexpTokenizer(r'\w+')
words.extend(tokenizer.tokenize(statement))
bigram_measures = nltk.collocations.BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(words)
# only bi-grams that appear n+ times
bigram_finder.apply_freq_filter(freq_filter)
# TODO: use custom stop words
bigram_finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in nltk.corpus.stopwords.words('english'))
bigram_results = bigram_finder.nbest(bigram_measures.likelihood_ratio, nbest)
return bigram_finder.score_ngrams(bigram_measures.likelihood_ratio)
def save_bigram_likelihood_tsv(statements, path):
"""
saves likely bigrams in a tsv
:param statements: list of strings
:param path: output path for saved tsv
"""
with open(path, "wb+") as output_file:
writer = csv.writer(output_file, delimiter="\t")
if len(statements) > 0:
statements = [removeNonAscii(statement) for statement in statements]
ngrams = get_bigram_likelihood(statements)
if ngrams != '':
for ngram in ngrams:
writer.writerow([ngram[0][0] + '_' + ngram[0][1], ngram[1]])
def save_bigrams_for_replacement_file_txt(statements, path):
"""
saves likely bigrams in a txt
:param statements: list of strings
:param path: output path for saved txt
"""
with open(path, "wb+") as output_file:
if len(statements) > 0:
statements = [removeNonAscii(statement) for statement in statements]
ngrams = get_bigram_likelihood(statements)
if ngrams != '':
for ngram in ngrams:
output_file.write(ngram[0][0] + ' ' + ngram[0][1] + '\n')
if __name__ == "__main__":
parser = DebateParser("./data/debates")
parser.parse()
save_bigram_likelihood_tsv([item[0] for sublist in parser.statements.values() for item in sublist],
os.path.join("data", "ngrams.tsv"))
save_bigrams_for_replacement_file_txt([item[0] for sublist in parser.statements.values() for item in sublist],
os.path.join("./data/mallet_files", "replacements.txt"))