-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPreprocessing.py
84 lines (61 loc) · 2.62 KB
/
Preprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# import commands
import os
import csv
import string
import textmining
import numpy as np
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk import tokenize
from nltk.corpus import stopwords
#Removing Punctuation
def strip_punctuation(s):
table = str.maketrans({key: None for key in string.punctuation})
return s.translate(table)
def preprocess(inputFile,f_name):
# Read the text file
file = open(inputFile, 'r')
text = file.read()
text = text.replace('\n',' ')
text = text.replace("U.S.","US")
text = text.replace("U.N.","UN")
text = text.replace("Gov.","government")
#Number of words in the text
words_count = len(word_tokenize(strip_punctuation(text)))
# split in to sentences and store the sentences in a list
sentences = tokenize.sent_tokenize(text)
#Original Sentences
sentences_backup = list(sentences)
#Write the sentences with sentence number in a file
fileObj1 = open(".\\Pre_Processed\\"+f.replace('.txt','')+"_SS.txt", 'w')
for i in range(len(sentences_backup)):
fileObj1.write(str(i+1)+ " "+sentences_backup[i])
fileObj1.write('\n')
fileObj1.close()
filtered_sentences = []
# Apply stop word removal to each sentence
stop_words = set(stopwords.words('english'))
for i in range(len(sentences_backup)):
temp = []
word_tokens = word_tokenize(strip_punctuation(sentences_backup[i]))
for w in word_tokens:
if w.lower() not in stop_words:
temp.append(w.lower())
filtered_sentences.append(temp)
tdm = textmining.TermDocumentMatrix()
for i in range(len(sentences)):
sent = " ".join(filtered_sentences[i])
tdm.add_doc(sent)
temp = list(tdm.rows(cutoff=1))
vocab = tuple(temp[0])
X = np.array(temp[1:],dtype = 'float64')
X1 = X.transpose()
fileObj2 = ".\\Pre_Processed\\"+f_name.replace('.txt','')+".csv"
np.savetxt(fileObj2, X1, fmt='%1.5f', delimiter=",")
vocab1 = tuple(zip(vocab))
fileObj3 = ".\\Pre_Processed\\"+f_name.replace('.txt','')+"_terms.csv"
np.savetxt(fileObj3,vocab1, fmt='%s', delimiter=",")
os.chdir(".\\path to working directory")
for f in os.listdir(".\\Documents"):
inputFile = ".\\Documents\\"+f
preprocess(inputFile,f)