StackGenVis: Alignment of Data, Algorithms, and Models for Stacking Ensemble Learning Using Performance Metrics https://doi.org/10.1109/TVCG.2020.3030352
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
StackGenVis/run.py

765 lines
26 KiB

5 years ago
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
5 years ago
import re
from numpy import array
5 years ago
import pandas as pd
import warnings
import copy
from joblib import Memory
5 years ago
from itertools import chain
5 years ago
import ast
5 years ago
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
5 years ago
from sklearn.pipeline import make_pipeline
5 years ago
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.manifold import MDS
5 years ago
from sklearn.manifold import TSNE
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
5 years ago
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
5 years ago
5 years ago
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
5 years ago
from skdist.distribute.search import DistGridSearchCV
from pyspark.sql import SparkSession
5 years ago
# This block of code is for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global RANDOM_SEED
RANDOM_SEED = 42
global XData
XData = []
global yData
yData = []
5 years ago
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 3
global scoring
#scoring = {'accuracy': 'accuracy', 'f1_macro': 'f1_weighted', 'precision': 'precision_weighted', 'recall': 'recall_weighted', 'jaccard': 'jaccard_weighted', 'neg_log_loss': 'neg_log_loss', 'r2': 'r2', 'neg_mean_absolute_error': 'neg_mean_absolute_error', 'neg_mean_absolute_error': 'neg_mean_absolute_error'}
scoring = {'accuracy': 'accuracy', 'f1_macro': 'f1_weighted', 'precision': 'precision_weighted', 'recall': 'recall_weighted', 'jaccard': 'jaccard_weighted'}
global loopFeatures
loopFeatures = 2
global columns
columns = []
global results
results = []
global target_names
target_names = []
return 'The reset was done!'
5 years ago
# Retrieve data from client and select the correct data set
5 years ago
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
5 years ago
fileName = request.get_data().decode('utf8').replace("'", '"')
#global featureSelection
#featureSelection = request.get_data().decode('utf8').replace("'", '"')
#featureSelection = json.loads(featureSelection)
5 years ago
global DataRawLength
global DataResultsRaw
5 years ago
5 years ago
global RANDOM_SEED
RANDOM_SEED = 42
global XData
XData = []
global yData
yData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
5 years ago
global detailsParams
detailsParams = []
5 years ago
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
5 years ago
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 3
5 years ago
global scoring
scoring = {'accuracy': 'accuracy', 'f1_macro': 'f1_weighted', 'precision': 'precision_weighted', 'recall': 'recall_weighted', 'jaccard': 'jaccard_weighted'}
5 years ago
#scoring = {'accuracy': 'accuracy', 'f1_macro': 'f1_weighted', 'precision': 'precision_weighted', 'recall': 'recall_weighted', 'jaccard': 'jaccard_weighted', 'neg_log_loss': 'neg_log_loss', 'r2': 'r2', 'neg_mean_absolute_error': 'neg_mean_absolute_error', 'neg_mean_absolute_error': 'neg_mean_absolute_error'}
5 years ago
global NumberofscoringMetrics
NumberofscoringMetrics = len(scoring)
5 years ago
global loopFeatures
loopFeatures = 2
5 years ago
global factors
factors = [1,1,1,1,1]
global columns
columns = []
5 years ago
global results
results = []
global target_names
target_names = []
5 years ago
DataRawLength = -1
data = json.loads(fileName)
if data['fileName'] == 'BreastC':
CollectionDB = mongo.db.BreastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.DiabetesC.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
5 years ago
DataSetSelection()
return 'Everything is okay'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
5 years ago
'Collection': DataResultsRaw
}
return jsonify(response)
5 years ago
def DataSetSelection():
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
5 years ago
5 years ago
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
warnings.simplefilter('ignore')
return 'Everything is okay'
5 years ago
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
5 years ago
# This block of code is for server computations
5 years ago
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
5 years ago
# Initialize every model for each algorithm
5 years ago
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
5 years ago
# get the models from the frontend
5 years ago
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
5 years ago
5 years ago
algorithms = RetrievedModel['Algorithms']
5 years ago
# loop through the algorithms
global allParametersPerformancePerModel
5 years ago
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'weights': ['uniform', 'distance'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski']}
5 years ago
AlgorithmsIDsEnd = 0
5 years ago
else:
clf = RandomForestClassifier()
params = {'n_estimators': list(range(80, 120)), 'criterion': ['gini', 'entropy']}
5 years ago
AlgorithmsIDsEnd = 576
allParametersPerformancePerModel = GridSearchForModels(clf, params, eachAlgor, factors, AlgorithmsIDsEnd)
# call the function that sends the results to the frontend
5 years ago
SendEachClassifiersPerformanceToVisualize()
5 years ago
5 years ago
return 'Everything Okay'
5 years ago
location = './cachedir'
memory = Memory(location, verbose=0)
5 years ago
5 years ago
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(clf, params, eachAlgor, factors, AlgorithmsIDsEnd):
# instantiate spark session
spark = (
SparkSession
.builder
.getOrCreate()
)
sc = spark.sparkContext
# this is the grid we use to train the models
grid = DistGridSearchCV(
estimator=clf, param_grid=params,
sc=sc, cv=crossValidation, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
5 years ago
5 years ago
# fit and extract the probabilities
5 years ago
grid.fit(XData, yData)
5 years ago
# process the results
5 years ago
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
5 years ago
5 years ago
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
5 years ago
df_cv_results_per_row = []
5 years ago
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
5 years ago
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
5 years ago
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
5 years ago
5 years ago
# copy and filter in order to get only the metrics
5 years ago
metrics = df_cv_results_classifiers.copy()
5 years ago
metrics = metrics.filter(['mean_test_accuracy','mean_test_f1_macro','mean_test_precision','mean_test_recall','mean_test_jaccard'])
5 years ago
5 years ago
# control the factors
5 years ago
sumperModel = []
for index, row in metrics.iterrows():
rowSum = 0
lengthFactors = NumberofscoringMetrics
for loop,elements in enumerate(row):
lengthFactors = lengthFactors - 1 + factors[loop]
rowSum = elements*factors[loop] + rowSum
if lengthFactors is 0:
sumperModel = 0
else:
sumperModel.append(rowSum/lengthFactors)
5 years ago
# summarize all models metrics
summarizedMetrics = pd.DataFrame(sumperModel)
summarizedMetrics.rename(columns={0:'sum'})
# concat parameters and performance
parameters = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = pd.concat([summarizedMetrics, parameters], axis=1)
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
5 years ago
PerClassMetric = []
5 years ago
perModelProb = []
5 years ago
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=crossValidation)
PerFeatureAccuracy.append(scores.mean())
clf.fit(XData, yData)
yPredict = clf.predict(XData)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
5 years ago
yPredictProb = clf.predict_proba(XData)
perModelProb.append(yPredictProb.tolist())
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
5 years ago
5 years ago
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
5 years ago
5 years ago
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracy)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
5 years ago
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
5 years ago
return results
# Sending each model's results to frontend
5 years ago
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
5 years ago
def SendEachClassifiersPerformanceToVisualize():
5 years ago
response = {
5 years ago
'PerformancePerModel': allParametersPerformancePerModel,
5 years ago
}
return jsonify(response)
5 years ago
#def Remove(duplicate):
# final_list = []
# for num in duplicate:
# if num not in final_list:
# if (isinstance(num, float)):
# if np.isnan(num):
# pass
# else:
# final_list.append(int(num))
# else:
# final_list.append(num)
# return final_list
5 years ago
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
5 years ago
counter1 = 0
counter2 = 0
5 years ago
global KNNModels
5 years ago
KNNModels = []
5 years ago
global RFModels
5 years ago
RFModels = []
5 years ago
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
5 years ago
if (items == 'KNN'):
counter1 = counter1 + 1
5 years ago
KNNModels.append(int(RetrieveModelsPar['models'][index]))
5 years ago
else:
counter2 = counter2 + 1
5 years ago
RFModels.append(int(RetrieveModelsPar['models'][index])-576)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
sumPerClassifierSel = []
sumPerClassifierSel = sumPerMetric(FactorsInt['Factors'])
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
preProcessResults = []
preProcessResults = Preprocessing()
XClassifiers = preProcessResults[3]
flagLocal = 0
countRemovals = 0
for l,el in enumerate(FactorsInt['Factors']):
if el is 0:
XClassifiers.drop(XClassifiers.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal is 1:
ModelSpaceMDSNew = FunMDS(XClassifiers)
ModelSpaceTSNENew = FunTsne(XClassifiers)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicRF = json.loads(allParametersPerformancePerModel[14])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfKNNFiltered = dfKNN.iloc[KNNModels, :]
dfRF = pd.DataFrame.from_dict(dicRF)
dfRFFiltered = dfRF.iloc[RFModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfRFFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicRF = json.loads(allParametersPerformancePerModel[15])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfKNNFiltered = dfKNN.iloc[KNNModels, :]
dfRF = pd.DataFrame.from_dict(dicRF)
dfRFFiltered = dfRF.iloc[RFModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfRFFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2).fit_transform(data)
tsne.shape
return tsne
def InitializeEnsemble():
XModels = PreprocessingMetrics()
DataSpace = FunTsne(XData)
DataSpaceList = DataSpace.tolist()
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
PredictionProbSel = PreprocessingPred()
PredictionSpace = FunTsne(PredictionProbSel)
PredictionSpaceList = PredictionSpace.tolist()
key = 0
global scores
scores = EnsembleModel(key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,DataSpaceList,PredictionSpaceList)
def ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,DataSpaceList,PredictionSpaceList):
global Results
Results = []
XDataJSON = XData.columns.tolist()
Results.append(json.dumps(ModelSpaceMDS)) # Position: 0
Results.append(json.dumps(target_names)) # Position: 1
Results.append(json.dumps(XDataJSON)) # Position: 2
Results.append(json.dumps(DataSpaceList)) # Position: 3
Results.append(json.dumps(PredictionSpaceList)) # Position: 4
Results.append(json.dumps(ModelSpaceTSNE)) # Position: 5
return Results
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/PlotClassifiers', methods=["GET", "POST"])
def SendToPlot():
while (len(DataResultsRaw) != DataRawLength):
pass
InitializeEnsemble()
response = {
'OverviewResults': Results
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoin', methods=["GET", "POST"])
def RetrieveSelClassifiersID():
global ClassifierIDsList
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
key = 1
EnsembleModel(ClassifierIDsList, key)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/FeaturesSelection', methods=["GET", "POST"])
def FeatureSelPerModel():
global featureSelection
global ClassifierIDsList
featureSelection = request.get_data().decode('utf8').replace("'", '"')
featureSelection = json.loads(featureSelection)
global detailsParams
global algorithmList
results = []
5 years ago
global resultsList
resultsList = []
5 years ago
global loopFeatures
loopFeatures = 2
algorithmsWithoutDuplicates = list(dict.fromkeys(algorithmList))
for index, eachalgor in enumerate(algorithmsWithoutDuplicates):
if (eachalgor == 'KNN'):
5 years ago
clf = KNeighborsClassifier()
5 years ago
params = detailsParams[index]
results.append(GridSearch(clf, params))
5 years ago
resultsList.append(results[0])
5 years ago
else:
5 years ago
clf = RandomForestClassifier()
5 years ago
params = detailsParams[index]
results.append(GridSearch(clf, params))
5 years ago
resultsList.append(results[0])
5 years ago
if (featureSelection['featureSelection'] == ''):
key = 0
else:
key = 2
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
@memory.cache
def EnsembleModel(keyRetrieved):
scoresLocal = []
all_classifiersSelection = []
if (keyRetrieved == 0):
columnsInit = []
all_classifiers = []
columnsInit = [XData.columns.get_loc(c) for c in XData.columns if c in XData]
temp = json.loads(allParametersPerformancePerModel[1])
dfParamKNN = pd.DataFrame.from_dict(temp)
dfParamKNNFilt = dfParamKNN.iloc[:,1]
for eachelem in KNNModels:
arg = dfParamKNNFilt[eachelem]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), KNeighborsClassifier().set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[9])
dfParamRF = pd.DataFrame.from_dict(temp)
dfParamRFFilt = dfParamRF.iloc[:,1]
for eachelem in RFModels:
arg = dfParamRFFilt[eachelem]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), RandomForestClassifier().set_params(**arg)))
lr = LogisticRegression()
sclf = StackingCVClassifier(classifiers=all_classifiers,
use_probas=True,
meta_classifier=lr,
random_state=RANDOM_SEED,
n_jobs = -1)
elif (keyRetrieved == 1):
ClassifierIDsList = json.loads(ClassifierIDsList)
for loop in ClassifierIDsList['ClassifiersList']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
all_classifiersSelection.append(all_classifiers[temp[0]])
lr = LogisticRegression()
sclf = StackingCVClassifier(classifiers=all_classifiersSelection,
use_probas=True,
meta_classifier=lr,
random_state=RANDOM_SEED,
n_jobs = -1)
else:
columnsReduce = columns.copy()
lr = LogisticRegression()
if (len(all_classifiersSelection) == 0):
all_classifiers = []
for index, eachelem in enumerate(algorithmsWithoutDuplicates):
if (eachelem == 'KNN'):
for j, each in enumerate(resultsList[index][1]):
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsReduce[j]), KNeighborsClassifier().set_params(**each)))
del columnsReduce[0:len(resultsList[index][1])]
else:
for j, each in enumerate(resultsList[index][1]):
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsReduce[j]), RandomForestClassifier().set_params(**each)))
del columnsReduce[0:len(resultsList[index][1])]
sclf = StackingCVClassifier(classifiers=all_classifiers,
use_probas=True,
meta_classifier=lr,
random_state=RANDOM_SEED,
n_jobs = -1)
5 years ago
else:
5 years ago
for index, eachelem in enumerate(algorithmsWithoutDuplicates):
if (eachelem == 'KNN'):
for j, each in enumerate(resultsList[index][1]):
all_classifiersSelection.append(make_pipeline(ColumnSelector(cols=columnsReduce[j]), KNeighborsClassifier().set_params(**each)))
del columnsReduce[0:len(resultsList[index][1])]
else:
for j, each in enumerate(resultsList[index][1]):
all_classifiersSelection.append(make_pipeline(ColumnSelector(cols=columnsReduce[j]), RandomForestClassifier().set_params(**each)))
del columnsReduce[0:len(resultsList[index][1])]
sclf = StackingCVClassifier(classifiers=all_classifiersSelection,
use_probas=True,
meta_classifier=lr,
random_state=RANDOM_SEED,
n_jobs = -1)
for clf, label in zip([sclf],
['StackingClassifier']):
scoresLocal = model_selection.cross_val_score(clf, XData, yData, cv=crossValidation, scoring='accuracy')
return scoresLocal
# Sending the final results to be visualized as a line plot
@app.route('/data/SendFinalResultsBacktoVisualize', methods=["GET", "POST"])
def SendToPlotFinalResults():
FinalResults = []
FinalResults.append(scores.mean())
FinalResults.append(scores.std())
response = {
'FinalResults': FinalResults
}
return jsonify(response)