parent
68ff2d42b0
commit
9a93d84855
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@ |
|||||||
|
{"duration": 7.470085859298706, "input_args": {"Data": " F1 F2 F3 F4\n0 2.5 3.3 6.3 6.0\n1 1.9 2.7 5.8 5.1\n2 2.1 3.0 7.1 5.9\n3 1.8 2.9 6.3 5.6\n4 2.2 3.0 6.5 5.8\n.. ... ... ... ...\n145 0.3 3.0 4.8 1.4\n146 0.2 3.8 5.1 1.6\n147 0.2 3.2 4.6 1.4\n148 0.2 3.7 5.3 1.5\n149 0.2 3.3 5.0 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=9.044474534604408, gamma=0.1, probability=True, random_state=42)"}} |
Binary file not shown.
@ -0,0 +1 @@ |
|||||||
|
{"duration": 2.0144307613372803, "input_args": {"Data": " F1 F2_mms F3 F4 F1xF3\n0 2.5 0.541667 6.3 6.0 15.75\n1 1.9 0.291667 5.8 5.1 11.02\n2 2.1 0.416667 7.1 5.9 14.91\n3 1.8 0.375000 6.3 5.6 11.34\n4 2.2 0.416667 6.5 5.8 14.30\n.. ... ... ... ... ...\n145 0.3 0.416667 4.8 1.4 1.44\n146 0.2 0.750000 5.1 1.6 1.02\n147 0.2 0.500000 4.6 1.4 0.92\n148 0.2 0.708333 5.3 1.5 1.06\n149 0.2 0.541667 5.0 1.4 1.00\n\n[150 rows x 5 columns]", "clf": "SVC(C=44.03816499590499, gamma=0.01, probability=True, random_state=42)"}} |
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@ |
|||||||
|
{"duration": 2.988314151763916, "input_args": {"Data": " F1 F2 F3_l10 F4\n0 2.5 3.3 0.298389 6.0\n1 1.9 2.7 0.282605 5.1\n2 2.1 3.0 0.320533 5.9\n3 1.8 2.9 0.298389 5.6\n4 2.2 3.0 0.304254 5.8\n.. ... ... ... ...\n145 0.3 3.0 0.244984 1.4\n146 0.2 3.8 0.257268 1.6\n147 0.2 3.2 0.236226 1.4\n148 0.2 3.7 0.264948 1.5\n149 0.2 3.3 0.253280 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=44.03816499590499, gamma=0.01, probability=True, random_state=42)"}} |
@ -0,0 +1 @@ |
|||||||
|
{"duration": 7.335001707077026, "input_args": {"Data": " F1 F2 F3 F4\n0 2.5 3.3 6.3 6.0\n1 1.9 2.7 5.8 5.1\n2 2.1 3.0 7.1 5.9\n3 1.8 2.9 6.3 5.6\n4 2.2 3.0 6.5 5.8\n.. ... ... ... ...\n145 0.3 3.0 4.8 1.4\n146 0.2 3.8 5.1 1.6\n147 0.2 3.2 4.6 1.4\n148 0.2 3.7 5.3 1.5\n149 0.2 3.3 5.0 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=46.46493418419044, gamma=0.1, probability=True, random_state=42)"}} |
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@ |
|||||||
|
{"duration": 3.0411131381988525, "input_args": {"Data": " F1 F2 F3_l1p F4\n0 2.5 3.3 1.987874 6.0\n1 1.9 2.7 1.916923 5.1\n2 2.1 3.0 2.091864 5.9\n3 1.8 2.9 1.987874 5.6\n4 2.2 3.0 2.014903 5.8\n.. ... ... ... ...\n145 0.3 3.0 1.757858 1.4\n146 0.2 3.8 1.808289 1.6\n147 0.2 3.2 1.722767 1.4\n148 0.2 3.7 1.840550 1.5\n149 0.2 3.3 1.791759 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=44.03816499590499, gamma=0.01, probability=True, random_state=42)"}} |
@ -0,0 +1 @@ |
|||||||
|
{"duration": 8.3280029296875, "input_args": {"Data": " F1 F2 F3 F4\n0 2.5 3.3 6.3 6.0\n1 1.9 2.7 5.8 5.1\n2 2.1 3.0 7.1 5.9\n3 1.8 2.9 6.3 5.6\n4 2.2 3.0 6.5 5.8\n.. ... ... ... ...\n145 0.3 3.0 4.8 1.4\n146 0.2 3.8 5.1 1.6\n147 0.2 3.2 4.6 1.4\n148 0.2 3.7 5.3 1.5\n149 0.2 3.3 5.0 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=4.407539756085735, gamma=0.1935853267468035, probability=True,\n random_state=42)"}} |
@ -0,0 +1 @@ |
|||||||
|
{"duration": 5.4526307582855225, "input_args": {"Data": " F1 F2_mms F3 F4\n0 2.5 0.541667 6.3 6.0\n1 1.9 0.291667 5.8 5.1\n2 2.1 0.416667 7.1 5.9\n3 1.8 0.375000 6.3 5.6\n4 2.2 0.416667 6.5 5.8\n.. ... ... ... ...\n145 0.3 0.416667 4.8 1.4\n146 0.2 0.750000 5.1 1.6\n147 0.2 0.500000 4.6 1.4\n148 0.2 0.708333 5.3 1.5\n149 0.2 0.541667 5.0 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=44.03816499590499, gamma=0.01, probability=True, random_state=42)"}} |
@ -0,0 +1 @@ |
|||||||
|
{"duration": 8.652288913726807, "input_args": {"Data": " F1 F2 F3 F4\n0 2.5 3.3 6.3 6.0\n1 1.9 2.7 5.8 5.1\n2 2.1 3.0 7.1 5.9\n3 1.8 2.9 6.3 5.6\n4 2.2 3.0 6.5 5.8\n.. ... ... ... ...\n145 0.3 3.0 4.8 1.4\n146 0.2 3.8 5.1 1.6\n147 0.2 3.2 4.6 1.4\n148 0.2 3.7 5.3 1.5\n149 0.2 3.3 5.0 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=44.03816499590499, gamma=0.01, probability=True, random_state=42)"}} |
@ -0,0 +1 @@ |
|||||||
|
{"duration": 3.5112879276275635, "input_args": {"Data": " F1 F3 F1+F3 |F1-F3| F1xF3 F1/F3 F3/F1\n0 2.5 6.3 8.8 3.8 15.75 0.396825 2.520000\n1 1.9 5.8 7.7 3.9 11.02 0.327586 3.052632\n2 2.1 7.1 9.2 5.0 14.91 0.295775 3.380952\n3 1.8 6.3 8.1 4.5 11.34 0.285714 3.500000\n4 2.2 6.5 8.7 4.3 14.30 0.338462 2.954545\n.. ... ... ... ... ... ... ...\n145 0.3 4.8 5.1 4.5 1.44 0.062500 16.000000\n146 0.2 5.1 5.3 4.9 1.02 0.039216 25.500000\n147 0.2 4.6 4.8 4.4 0.92 0.043478 23.000000\n148 0.2 5.3 5.5 5.1 1.06 0.037736 26.500000\n149 0.2 5.0 5.2 4.8 1.00 0.040000 25.000000\n\n[150 rows x 7 columns]", "clf": "SVC(C=44.03816499590499, gamma=0.01, probability=True, random_state=42)"}} |
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@ |
|||||||
|
{"duration": 2.5880067348480225, "input_args": {"Data": " F1 F2 F3_l1p F4\n0 2.5 3.3 1.987874 6.0\n1 1.9 2.7 1.916923 5.1\n2 2.1 3.0 2.091864 5.9\n3 1.8 2.9 1.987874 5.6\n4 2.2 3.0 2.014903 5.8\n.. ... ... ... ...\n145 0.3 3.0 1.757858 1.4\n146 0.2 3.8 1.808289 1.6\n147 0.2 3.2 1.722767 1.4\n148 0.2 3.7 1.840550 1.5\n149 0.2 3.3 1.791759 1.4\n\n[150 rows x 4 columns]", "clf": "SVC(C=46.46493418419044, gamma=0.1, probability=True, random_state=42)"}} |
@ -0,0 +1,99 @@ |
|||||||
|
# first line: 687 |
||||||
|
@memory.cache |
||||||
|
def estimatorFeatureSelection(Data, clf): |
||||||
|
|
||||||
|
resultsFS = [] |
||||||
|
permList = [] |
||||||
|
PerFeatureAccuracy = [] |
||||||
|
PerFeatureAccuracyAll = [] |
||||||
|
ImpurityFS = [] |
||||||
|
RankingFS = [] |
||||||
|
|
||||||
|
rf = RandomForestClassifier(n_estimators = 100, |
||||||
|
n_jobs = -1, |
||||||
|
random_state = RANDOM_SEED) |
||||||
|
rf.fit(Data, yData) |
||||||
|
|
||||||
|
importances = rf.feature_importances_ |
||||||
|
|
||||||
|
std = np.std([tree.feature_importances_ for tree in rf.estimators_], |
||||||
|
axis=0) |
||||||
|
|
||||||
|
maxList = max(importances) |
||||||
|
minList = min(importances) |
||||||
|
|
||||||
|
for f in range(Data.shape[1]): |
||||||
|
ImpurityFS.append((importances[f] - minList) / (maxList - minList)) |
||||||
|
|
||||||
|
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED) |
||||||
|
|
||||||
|
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation) |
||||||
|
selector = selector.fit(Data, yData) |
||||||
|
RFEImp = selector.ranking_ |
||||||
|
|
||||||
|
for f in range(Data.shape[1]): |
||||||
|
if (RFEImp[f] == 1): |
||||||
|
RankingFS.append(0.95) |
||||||
|
elif (RFEImp[f] == 2): |
||||||
|
RankingFS.append(0.85) |
||||||
|
elif (RFEImp[f] == 3): |
||||||
|
RankingFS.append(0.75) |
||||||
|
elif (RFEImp[f] == 4): |
||||||
|
RankingFS.append(0.65) |
||||||
|
elif (RFEImp[f] == 5): |
||||||
|
RankingFS.append(0.55) |
||||||
|
elif (RFEImp[f] == 6): |
||||||
|
RankingFS.append(0.45) |
||||||
|
elif (RFEImp[f] == 7): |
||||||
|
RankingFS.append(0.35) |
||||||
|
elif (RFEImp[f] == 8): |
||||||
|
RankingFS.append(0.25) |
||||||
|
elif (RFEImp[f] == 9): |
||||||
|
RankingFS.append(0.15) |
||||||
|
else: |
||||||
|
RankingFS.append(0.05) |
||||||
|
|
||||||
|
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData) |
||||||
|
permList.append(perm.feature_importances_) |
||||||
|
n_feats = Data.shape[1] |
||||||
|
|
||||||
|
num_cores = multiprocessing.cpu_count() |
||||||
|
print("Parallelization Initialized") |
||||||
|
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats)) |
||||||
|
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist] |
||||||
|
# for i in range(n_feats): |
||||||
|
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1) |
||||||
|
# PerFeatureAccuracy.append(scoresHere.mean()) |
||||||
|
PerFeatureAccuracyAll.append(PerFeatureAccuracy) |
||||||
|
|
||||||
|
clf.fit(Data, yData) |
||||||
|
yPredict = clf.predict(Data) |
||||||
|
yPredict = np.nan_to_num(yPredict) |
||||||
|
|
||||||
|
RankingFSDF = pd.DataFrame(RankingFS) |
||||||
|
RankingFSDF = RankingFSDF.to_json() |
||||||
|
|
||||||
|
ImpurityFSDF = pd.DataFrame(ImpurityFS) |
||||||
|
ImpurityFSDF = ImpurityFSDF.to_json() |
||||||
|
|
||||||
|
perm_imp_eli5PD = pd.DataFrame(permList) |
||||||
|
perm_imp_eli5PD = perm_imp_eli5PD.to_json() |
||||||
|
|
||||||
|
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll) |
||||||
|
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json() |
||||||
|
|
||||||
|
bestfeatures = SelectKBest(score_func=f_classif, k='all') |
||||||
|
fit = bestfeatures.fit(Data,yData) |
||||||
|
dfscores = pd.DataFrame(fit.scores_) |
||||||
|
dfcolumns = pd.DataFrame(Data.columns) |
||||||
|
featureScores = pd.concat([dfcolumns,dfscores],axis=1) |
||||||
|
featureScores.columns = ['Specs','Score'] #naming the dataframe columns |
||||||
|
featureScores = featureScores.to_json() |
||||||
|
|
||||||
|
resultsFS.append(featureScores) |
||||||
|
resultsFS.append(ImpurityFSDF) |
||||||
|
resultsFS.append(perm_imp_eli5PD) |
||||||
|
resultsFS.append(PerFeatureAccuracyPandas) |
||||||
|
resultsFS.append(RankingFSDF) |
||||||
|
|
||||||
|
return resultsFS |
@ -0,0 +1 @@ |
|||||||
|
{"duration": 13.781832933425903, "input_args": {"exeCall": "[]", "flagEx": "0", "nodeTransfName": "''"}} |
Binary file not shown.
@ -1 +0,0 @@ |
|||||||
{"duration": 7.80327582359314, "input_args": {}} |
|
Binary file not shown.
@ -1,15 +1,175 @@ |
|||||||
# first line: 457 |
# first line: 473 |
||||||
@memory.cache |
@memory.cache |
||||||
def executeModel(): |
# check this issue later because we are not getting the same results |
||||||
|
def executeModel(exeCall, flagEx, nodeTransfName): |
||||||
|
|
||||||
create_global_function() |
global keyFirstTime |
||||||
global estimator |
global estimator |
||||||
|
global yPredictProb |
||||||
|
global scores |
||||||
|
global featureImportanceData |
||||||
|
global XData |
||||||
|
global XDataStored |
||||||
|
global previousState |
||||||
|
global columnsNewGen |
||||||
|
global columnsNames |
||||||
|
global listofTransformations |
||||||
|
global XDataStoredOriginal |
||||||
|
global finalResultsData |
||||||
|
columnsNames = [] |
||||||
|
scores = [] |
||||||
|
|
||||||
|
if (len(exeCall) == 0): |
||||||
|
if (flagEx == 3): |
||||||
|
XDataStored = XData.copy() |
||||||
|
else: |
||||||
|
XData = XDataStored.copy() |
||||||
|
XDataStoredOriginal = XDataStored.copy() |
||||||
|
else: |
||||||
|
if (flagEx == 4): |
||||||
|
XDataStored = XData.copy() |
||||||
|
else: |
||||||
|
XData = XDataStored.copy() |
||||||
|
XDataStoredOriginal = XDataStored.copy() |
||||||
|
columnsNewGen = keepOriginalFeatures.columns.values.tolist() |
||||||
|
# Bayesian Optimization for 150 iterations |
||||||
|
if (keyFirstTime): |
||||||
|
create_global_function() |
||||||
params = {"C": (0.0001, 10000), "gamma": (0.0001, 10000)} |
params = {"C": (0.0001, 10000), "gamma": (0.0001, 10000)} |
||||||
svc_bayesopt = BayesianOptimization(estimator, params) |
svc_bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED) |
||||||
svc_bayesopt.maximize(init_points=10, n_iter=25, acq='ucb') |
svc_bayesopt.maximize(init_points=130, n_iter=20, acq='ucb') |
||||||
bestParams = svc_bayesopt.max['params'] |
bestParams = svc_bayesopt.max['params'] |
||||||
estimator = SVC(C=bestParams.get('C'), gamma=bestParams.get('gamma'), probability=True) |
estimator = SVC(C=bestParams.get('C'), gamma=bestParams.get('gamma'), probability=True, random_state=RANDOM_SEED) |
||||||
|
|
||||||
|
if (len(exeCall) != 0): |
||||||
|
if (flagEx == 1): |
||||||
|
XData = XData.drop(XData.columns[exeCall], axis=1) |
||||||
|
XDataStoredOriginal = XDataStoredOriginal.drop(XDataStoredOriginal.columns[exeCall], axis=1) |
||||||
|
elif (flagEx == 2): |
||||||
|
columnsKeepNew = [] |
||||||
|
columns = XDataGen.columns.values.tolist() |
||||||
|
for indx, col in enumerate(columns): |
||||||
|
if indx in exeCall: |
||||||
|
columnsKeepNew.append(col) |
||||||
|
columnsNewGen.append(col) |
||||||
|
|
||||||
|
XDataTemp = XDataGen[columnsKeepNew] |
||||||
|
XData[columnsKeepNew] = XDataTemp.values |
||||||
|
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values |
||||||
|
elif (flagEx == 4): |
||||||
|
splittedCol = nodeTransfName.split('_') |
||||||
|
XData.rename(columns={ XData.columns[exeCall[0]]: nodeTransfName }, inplace = True) |
||||||
|
currentColumn = columnsNewGen[exeCall[0]] |
||||||
|
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")] |
||||||
|
replacement = currentColumn.replace(subString, nodeTransfName) |
||||||
|
storePositions.append(exeCall[0]) |
||||||
|
storeReplacements.append(replacement) |
||||||
|
pos = 0 |
||||||
|
for repl in storeReplacements: |
||||||
|
columnsNewGen[storePositions[pos]] = repl |
||||||
|
pos += 1 |
||||||
|
if (len(splittedCol) == 1): |
||||||
|
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName] |
||||||
|
else: |
||||||
|
if (splittedCol[1] == 'r'): |
||||||
|
XData[nodeTransfName] = XData[nodeTransfName].round() |
||||||
|
elif (splittedCol[1] == 'b'): |
||||||
|
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto') |
||||||
|
emptyLabels = [] |
||||||
|
for index, number in enumerate(number_of_bins): |
||||||
|
if (index == 0): |
||||||
|
pass |
||||||
|
else: |
||||||
|
emptyLabels.append(index) |
||||||
|
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True) |
||||||
|
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed') |
||||||
|
elif (splittedCol[1] == 'zs'): |
||||||
|
zScore = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std() |
||||||
|
XData[nodeTransfName] = abs(zScore.min()) + zScore |
||||||
|
elif (splittedCol[1] == 'mms'): |
||||||
|
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min()) |
||||||
|
elif (splittedCol[1] == 'l2'): |
||||||
|
dfTemp = np.log10(XData[nodeTransfName]) |
||||||
|
if (dfTemp < 0).values.any(): |
||||||
|
XData[nodeTransfName] = abs(dfTemp.min()) + dfTemp |
||||||
|
else: |
||||||
|
XData[nodeTransfName] = dfTemp |
||||||
|
elif (splittedCol[1] == 'l1p'): |
||||||
|
XData[nodeTransfName] = np.log1p(XData[nodeTransfName]) |
||||||
|
elif (splittedCol[1] == 'l10'): |
||||||
|
dfTemp = np.log10(XData[nodeTransfName]) |
||||||
|
if (dfTemp < 0).values.any(): |
||||||
|
XData[nodeTransfName] = abs(dfTemp.min()) + dfTemp |
||||||
|
else: |
||||||
|
XData[nodeTransfName] = dfTemp |
||||||
|
elif (splittedCol[1] == 'e2'): |
||||||
|
XData[nodeTransfName] = np.exp2(XData[nodeTransfName]) |
||||||
|
elif (splittedCol[1] == 'em1'): |
||||||
|
XData[nodeTransfName] = np.expm1(XData[nodeTransfName]) |
||||||
|
elif (splittedCol[1] == 'p2'): |
||||||
|
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2) |
||||||
|
elif (splittedCol[1] == 'p3'): |
||||||
|
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3) |
||||||
|
else: |
||||||
|
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4) |
||||||
|
XDataStored = XData.copy() |
||||||
|
|
||||||
|
columnsNamesLoc = XData.columns.values.tolist() |
||||||
|
|
||||||
|
for col in columnsNamesLoc: |
||||||
|
splittedCol = col.split('_') |
||||||
|
if (len(splittedCol) == 1): |
||||||
|
for tran in listofTransformations: |
||||||
|
columnsNames.append(splittedCol[0]+'_'+tran) |
||||||
|
else: |
||||||
|
for tran in listofTransformations: |
||||||
|
if (splittedCol[1] == tran): |
||||||
|
columnsNames.append(splittedCol[0]) |
||||||
|
else: |
||||||
|
columnsNames.append(splittedCol[0]+'_'+tran) |
||||||
|
featureImportanceData = estimatorFeatureSelection(XData, estimator) |
||||||
estimator.fit(XData, yData) |
estimator.fit(XData, yData) |
||||||
yPredict = estimator.predict(XData) |
yPredict = estimator.predict(XData) |
||||||
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba') |
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba') |
||||||
|
print(XData) |
||||||
|
num_cores = multiprocessing.cpu_count() |
||||||
|
inputsSc = ['accuracy','precision_macro','recall_macro'] |
||||||
|
|
||||||
|
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc)) |
||||||
|
scoresAct = [item for sublist in flat_results for item in sublist] |
||||||
|
|
||||||
|
howMany = 0 |
||||||
|
|
||||||
|
if (keyFirstTime): |
||||||
|
previousState = scoresAct |
||||||
|
keyFirstTime = False |
||||||
|
howMany = 3 |
||||||
|
|
||||||
|
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))): |
||||||
|
finalResultsData = XData.copy() |
||||||
|
print('improved') |
||||||
|
|
||||||
|
if (keyFirstTime == False): |
||||||
|
if ((scoresAct[0]-scoresAct[1]) > (previousState[0]-previousState[1])): |
||||||
|
previousState[0] = scoresAct[0] |
||||||
|
previousState[1] = scoresAct[1] |
||||||
|
howMany = howMany + 1 |
||||||
|
elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])): |
||||||
|
previousState[2] = scoresAct[2] |
||||||
|
previousState[3] = scoresAct[3] |
||||||
|
howMany = howMany + 1 |
||||||
|
elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])): |
||||||
|
previousState[4] = scoresAct[4] |
||||||
|
previousState[5] = scoresAct[5] |
||||||
|
howMany = howMany + 1 |
||||||
|
else: |
||||||
|
pass |
||||||
|
|
||||||
|
scores = scoresAct + previousState |
||||||
|
|
||||||
|
if (howMany == 3): |
||||||
|
scores.append(1) |
||||||
|
else: |
||||||
|
scores.append(0) |
||||||
|
|
||||||
|
return 'Everything Okay' |
||||||
|
@ -0,0 +1,40 @@ |
|||||||
|
<template> |
||||||
|
<div id="ExportResults"> |
||||||
|
======================================================= |
||||||
|
<br> |
||||||
|
New features: {{ FeaturesPickled }} |
||||||
|
<br> |
||||||
|
======================================================= |
||||||
|
</div> |
||||||
|
</template> |
||||||
|
|
||||||
|
<script> |
||||||
|
|
||||||
|
import { EventBus } from '../main.js' |
||||||
|
import * as Cryo from 'cryo' |
||||||
|
export default { |
||||||
|
name: 'Export', |
||||||
|
data () { |
||||||
|
return { |
||||||
|
FeaturesPickled: '', |
||||||
|
Features: [], |
||||||
|
} |
||||||
|
}, |
||||||
|
methods: { |
||||||
|
Pickle () { |
||||||
|
this.FeaturesPickled = Cryo.stringify(this.Features) |
||||||
|
} |
||||||
|
}, |
||||||
|
mounted () { |
||||||
|
EventBus.$on('sendSelectedFeaturestoPickle', data => { |
||||||
|
this.Features = data}) |
||||||
|
EventBus.$on('sendSelectedFeaturestoPickle', this.Pickle) |
||||||
|
} |
||||||
|
} |
||||||
|
</script> |
||||||
|
|
||||||
|
<style scoped> |
||||||
|
#ExportResults { |
||||||
|
word-break: break-all !important; |
||||||
|
} |
||||||
|
</style> |
@ -0,0 +1,44 @@ |
|||||||
|
<template> |
||||||
|
<h5><button class="btn-outline-custom" |
||||||
|
style="float: center;" |
||||||
|
id="know" |
||||||
|
v-on:click="knowClass"> |
||||||
|
<font-awesome-icon icon="file-export" /> |
||||||
|
{{ valueKnowE }} |
||||||
|
</button></h5> |
||||||
|
</template> |
||||||
|
|
||||||
|
<script> |
||||||
|
|
||||||
|
import { EventBus } from '../main.js' |
||||||
|
|
||||||
|
export default { |
||||||
|
name: 'Knowledge', |
||||||
|
data () { |
||||||
|
return { |
||||||
|
valueKnowE: 'Feature extraction' |
||||||
|
} |
||||||
|
}, |
||||||
|
methods: { |
||||||
|
knowClass () { |
||||||
|
EventBus.$emit('OpenModal') |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
</script> |
||||||
|
|
||||||
|
<style> |
||||||
|
|
||||||
|
.btn-outline-custom { |
||||||
|
color: #B15928; |
||||||
|
background-color: #FFFFFF; |
||||||
|
border-color: #B15928; |
||||||
|
} |
||||||
|
|
||||||
|
.btn-outline-custom:hover { |
||||||
|
color: #FFFFFF; |
||||||
|
background-color: #B15928; |
||||||
|
border-color: #B15928; |
||||||
|
} |
||||||
|
|
||||||
|
</style> |
Loading…
Reference in new issue