-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsearch_best_params.py
executable file
·79 lines (65 loc) · 2.65 KB
/
search_best_params.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#!/usr/bin/env python3
__author__ = 'morban'
__email__ = 'mathieu.orban@openedition.org'
from multiprocessing import cpu_count
import numpy as np
from pprint import pprint
from time import time
import logging
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
import argparse
parser = argparse.ArgumentParser(description='Get best params for training. Give at the binary classifier one directory which contains two subdirectories')
parser.add_argument('-d','--data_train', metavar='DATATRAIN', type=str, help='Path to directory. This directory contains 2 subdirectory. Each one is a category.')
args = parser.parse_args()
list_stop_words = []
with open('stop_list_fr.txt', "r") as f:
list_stop_words = [ st.rstrip() for st in f.readlines()]
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__analyzer': ('word',),
'vect__stop_words': (list_stop_words,'english', None),
'vect__max_df': (0.1, 0.3, 0.5),
'vect__max_features': (3000, 5000, 7000),
'vect__ngram_range': ((1, 1), (1, 2), (1,3)), # unigrams or bigrams
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__loss' : ('hinge', 'log', 'modified_huber'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
#Get data train
categories=['CR', 'NCR']
dataset = load_files(args.data_train, categories=categories)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
jobs = int(cpu_count()*1.5)
grid_search = GridSearchCV(pipeline, parameters, n_jobs = jobs, verbose =1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
X, Y = dataset.data, dataset.target
t0 = time()
grid_search.fit(X, Y)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))