fixed the CM process

master
parent 3c669717e2
commit 585622e631
  1. BIN
      __pycache__/run.cpython-37.pyc
  2. BIN
      cachedir/joblib/run/randomSearch/0a7800f040057f3aab13a59ebd153437/output.pkl
  3. BIN
      cachedir/joblib/run/randomSearch/15c1a1c6505bfd383b2231a984466199/output.pkl
  4. BIN
      cachedir/joblib/run/randomSearch/33864c0fe6e12968caff785cfea6870b/output.pkl
  5. 1
      cachedir/joblib/run/randomSearch/37316eae58a8c74b9165423658201b28/metadata.json
  6. 2
      cachedir/joblib/run/randomSearch/8dd8c905a5a57cf1d9b467f291dd14b2/metadata.json
  7. 1
      cachedir/joblib/run/randomSearch/8ee66eaa63ddb9f764dde4060234b1f9/metadata.json
  8. 1
      cachedir/joblib/run/randomSearch/9d85ea1f37edb91533a828737a6caa01/metadata.json
  9. BIN
      cachedir/joblib/run/randomSearch/a4fb34c288b3df59f6e95b0829e85cf8/output.pkl
  10. BIN
      cachedir/joblib/run/randomSearch/b05bc4dd718bb370a35dbc60bdff86aa/output.pkl
  11. BIN
      cachedir/joblib/run/randomSearch/b4f7364f1930f16773596d5d85ded668/output.pkl
  12. 1
      cachedir/joblib/run/randomSearch/b7a3520563829a2c45169ede4c18dce6/metadata.json
  13. 1
      cachedir/joblib/run/randomSearch/db2d0ed5e07bad06c380972b056d9c63/metadata.json
  14. 1
      cachedir/joblib/run/randomSearch/e4dca276cc6a9475d2aee588523b478e/metadata.json
  15. 1
      cachedir/joblib/run/randomSearch/eb45ad56de8e367dade6cb77617a3d36/metadata.json
  16. BIN
      cachedir/joblib/run/randomSearch/f7b8e3b85d52717cbc9c9dd04277903e/output.pkl
  17. 2
      cachedir/joblib/run/randomSearch/func_code.py
  18. 2
      frontend/src/components/Ensemble.vue
  19. 2
      frontend/src/components/HyperParameterSpace.vue
  20. 3
      frontend/src/components/Main.vue
  21. 3
      frontend/src/components/Predictions.vue
  22. 4
      frontend/src/components/PredictionsCM.vue
  23. 103
      run.py

Binary file not shown.

@ -1 +0,0 @@
{"duration": 7.371565103530884, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n298 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n299 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n300 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n301 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n302 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='kd_tree', leaf_size=30, metric='chebyshev',\n metric_params=None, n_jobs=None, n_neighbors=17, p=2,\n weights='uniform')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -1 +1 @@
{"duration": 25.273269653320312, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n298 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n299 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n300 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n301 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n302 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "LogisticRegression(C=21, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=450,\n multi_class='auto', n_jobs=None, penalty='none',\n random_state=42, solver='sag', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'max_iter': [50, 100, 150, 200, 250, 300, 350, 400, 450], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "100"}}
{"duration": 24.977500915527344, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n298 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n299 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n300 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n301 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n302 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "LogisticRegression(C=51, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=250,\n multi_class='auto', n_jobs=None, penalty='l2',\n random_state=42, solver='newton-cg', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'max_iter': [50, 100, 150, 200, 250, 300, 350, 400, 450], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "100"}}

@ -0,0 +1 @@
{"duration": 12.29799771308899, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n601 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n602 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n603 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n604 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n605 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[606 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='brute', leaf_size=30, metric='manhattan',\n metric_params=None, n_jobs=None, n_neighbors=73, p=2,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -1 +0,0 @@
{"duration": 12.543240070343018, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "KNeighborsClassifier(algorithm='ball_tree', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=None, n_neighbors=82, p=2,\n weights='uniform')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -0,0 +1 @@
{"duration": 7.2485480308532715, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n298 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n299 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n300 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n301 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n302 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='kd_tree', leaf_size=30, metric='chebyshev',\n metric_params=None, n_jobs=None, n_neighbors=26, p=2,\n weights='uniform')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -0,0 +1 @@
{"duration": 31.870266675949097, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n601 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n602 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n603 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n604 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n605 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[606 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "LogisticRegression(C=48, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='auto', n_jobs=None, penalty='none',\n random_state=42, solver='saga', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'max_iter': [50, 100, 150, 200, 250, 300, 350, 400, 450], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "200"}}

@ -0,0 +1 @@
{"duration": 9.726292848587036, "input_args": {"XData": " Age Sex Cp Trestbps Chol Fbs Restecg Thalach Exang Oldpeak Slope Ca Thal\n0 63 1 3 145 233 1 0 150 0 2.3 0 0 1\n1 37 1 2 130 250 0 1 187 0 3.5 0 0 2\n2 41 0 1 130 204 0 0 172 0 1.4 2 0 2\n3 56 1 1 120 236 0 1 178 0 0.8 2 0 2\n4 57 0 0 120 354 0 1 163 1 0.6 2 0 2\n.. ... ... .. ... ... ... ... ... ... ... ... .. ...\n601 57 0 0 140 241 0 1 123 1 0.2 1 0 3\n602 45 1 3 110 264 0 1 132 0 1.2 1 0 3\n603 68 1 0 144 193 1 1 141 0 3.4 1 2 3\n604 57 1 0 130 131 0 1 115 1 1.2 1 1 3\n605 57 0 1 130 236 0 0 174 0 0.0 1 1 2\n\n[606 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='kd_tree', leaf_size=30, metric='chebyshev',\n metric_params=None, n_jobs=None, n_neighbors=17, p=2,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "100"}}

@ -1 +0,0 @@
{"duration": 19.771310806274414, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LogisticRegression(C=64, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=300,\n multi_class='auto', n_jobs=None, penalty='none',\n random_state=42, solver='newton-cg', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'max_iter': [50, 100, 150, 200, 250, 300, 350, 400, 450], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "100"}}

@ -1,4 +1,4 @@
# first line: 501
# first line: 518
@memory.cache
def randomSearch(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):

@ -89,7 +89,7 @@ export default {
this.clean(parameters[i])
stringParameters.push(JSON.stringify(parameters[i]).replace(/,/gi, '<br>'))
}
// fix that!
var classifiersInfoProcessing = []
for (let i = 0; i < modelId.length; i++) {
if (i < 100) {

@ -73,7 +73,7 @@ export default {
this.clean(parameters[i])
stringParameters.push(JSON.stringify(parameters[i]).replace(/,/gi, '<br>'))
}
// fix that!
var classifiersInfoProcessing = []
for (let i = 0; i < modelId.length; i++) {
if (i < 100) {

@ -323,7 +323,7 @@ export default Vue.extend({
} else {
EventBus.$emit('emittedEventCallingCrossoverMutation', this.OverviewResults)
EventBus.$emit('emittedEventCallingGridCrossoverMutation', this.OverviewResults)
EventBus.$emit('emittedEventCallingGridSelectionGridCrossoverMutation', this.OverviewResults)
EventBus.$emit('emittedEventCallingGridSelectionCrossoverMutation', this.OverviewResults)
//this.getFinalResults()
}
})
@ -349,7 +349,6 @@ export default Vue.extend({
EventBus.$emit('emittedEventCallingScatterPlot', this.OverviewResultsCM)
EventBus.$emit('emittedEventCallingGrid', this.OverviewResultsCM)
EventBus.$emit('emittedEventCallingGridSelection', this.OverviewResultsCM)
//this.getFinalResults()
})
.catch(error => {
console.log(error)

@ -61,6 +61,7 @@ export default {
var KNNPred = predictions[0]
var LRPred = predictions[1]
var PredAver = predictions[2]
console.log(PredAver)
var dataAver = []
var dataAverGetResults = []
@ -124,7 +125,7 @@ export default {
}
var classStore = [].concat.apply([], classArray);
console.log(classStore)
// === Set up canvas === //
var width = 1200,

@ -431,8 +431,8 @@ export default {
EventBus.$on('emittedEventCallingGridCrossoverMutation', data => { this.GetResultsAllCM = data; })
EventBus.$on('emittedEventCallingGridCrossoverMutation', this.Grid)
EventBus.$on('emittedEventCallingGridSelectionGridCrossoverMutation', data => { this.GetResultsSelectionCM = data; })
EventBus.$on('emittedEventCallingGridSelectionGridCrossoverMutation', this.GridSelection)
EventBus.$on('emittedEventCallingGridSelectionCrossoverMutation', data => { this.GetResultsSelectionCM = data; })
EventBus.$on('emittedEventCallingGridSelectionCrossoverMutation', this.GridSelection)
EventBus.$on('SendSelectedPointsToServerEventCM', data => { this.predictSelectionCM = data; })
EventBus.$on('SendSelectedPointsToServerEventCM', this.GridSelection)

103
run.py

@ -76,6 +76,15 @@ def reset():
global yData
yData = []
global addKNN
addKNN = 0
global addLR
addLR = 100
global countAllModels
countAllModels = 0
global XDataStored
XDataStored = []
global yDataStored
@ -197,6 +206,12 @@ def retrieveFileName():
global detailsParams
detailsParams = []
global addKNN
addKNN = 0
global addLR
addLR = 100
# Initializing models
global RetrieveModelsList
@ -475,6 +490,7 @@ def retrieveModel():
global XData
global yData
global LRModelsCount
global countAllModels
# loop through the algorithms
global allParametersPerformancePerModel
@ -484,11 +500,12 @@ def retrieveModel():
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 100)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = KNNModelsCount
AlgorithmsIDsEnd = countAllModels
else:
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(1,100,1)), 'max_iter': list(np.arange(50,500,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
AlgorithmsIDsEnd = countAllModels
countAllModels = countAllModels + 100
allParametersPerformancePerModel = randomSearch(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd)
HistoryPreservation = allParametersPerformancePerModel.copy()
# call the function that sends the results to the frontend
@ -652,6 +669,7 @@ def PreprocessingParam():
dfLR = dfLR.T
df_params = pd.concat([dfKNN, dfLR])
df_params = df_params.reset_index(drop=True)
return df_params
def PreprocessingParamSep():
@ -788,7 +806,6 @@ def returnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionProbSel)
Results.append(json.dumps(ModelSpaceTSNE))
Results.append(json.dumps(ModelSpaceUMAP))
Results.append(json.dumps(PredictionProbSel))
print('mpike')
return Results
@ -806,6 +823,9 @@ def CrossoverMutateFun():
global XData
global yData
global LRModelsCount
global addKNN
global addLR
global countAllModels
# loop through the algorithms
global allParametersPerfCrossMutr
@ -819,17 +839,16 @@ def CrossoverMutateFun():
countLR = 0
setMaxLoopValue = 5
paramAllAlgs = PreprocessingParam()
KNNIntIndex = []
LRIntIndex = []
localCrossMutr = []
allParametersPerfCrossMutrKNNC = []
while countKNN < setMaxLoopValue:
for dr in KNNIDs:
KNNIntIndex.append(int(re.findall('\d+', dr)[0]))
KNNPickPair = random.sample(KNNIntIndex,2)
pairDF = paramAllAlgs.iloc[KNNPickPair]
crossoverDF = pd.DataFrame()
for column in pairDF:
@ -843,11 +862,13 @@ def CrossoverMutateFun():
else:
clf = KNeighborsClassifier()
params = {'n_neighbors': [crossoverDF['n_neighbors'].iloc[0]], 'metric': [crossoverDF['metric'].iloc[0]], 'algorithm': [crossoverDF['algorithm'].iloc[0]], 'weights': [crossoverDF['weights'].iloc[0]]}
AlgorithmsIDsEnd = 200 + countKNN
AlgorithmsIDsEnd = countAllModels + countKNN
localCrossMutr = crossoverMutation(XData, yData, clf, params, 'KNN', AlgorithmsIDsEnd)
countKNN += 1
crossoverDF = pd.DataFrame()
countAllModels = countAllModels + 5
for loop in range(setMaxLoopValue - 1):
localCrossMutr[0] = localCrossMutr[0] + localCrossMutr[(loop+1)*4]
localCrossMutr[1] = pd.concat([localCrossMutr[1], localCrossMutr[(loop+1)*4+1]], ignore_index=True)
@ -888,11 +909,13 @@ def CrossoverMutateFun():
else:
clf = KNeighborsClassifier()
params = {'n_neighbors': [crossoverDF['n_neighbors'].iloc[0]], 'metric': [crossoverDF['metric'].iloc[0]], 'algorithm': [crossoverDF['algorithm'].iloc[0]], 'weights': [crossoverDF['weights'].iloc[0]]}
AlgorithmsIDsEnd = 205 + countKNN
AlgorithmsIDsEnd = countAllModels + countKNN
localCrossMutr = crossoverMutation(XData, yData, clf, params, 'KNN', AlgorithmsIDsEnd)
countKNN += 1
crossoverDF = pd.DataFrame()
countAllModels = countAllModels + 5
for loop in range(setMaxLoopValue - 1):
localCrossMutr[0] = localCrossMutr[0] + localCrossMutr[(loop+1)*4]
localCrossMutr[1] = pd.concat([localCrossMutr[1], localCrossMutr[(loop+1)*4+1]], ignore_index=True)
@ -927,11 +950,13 @@ def CrossoverMutateFun():
else:
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': [crossoverDF['C'].iloc[0]], 'max_iter': [crossoverDF['max_iter'].iloc[0]], 'solver': [crossoverDF['solver'].iloc[0]], 'penalty': [crossoverDF['penalty'].iloc[0]]}
AlgorithmsIDsEnd = 210 + countLR
AlgorithmsIDsEnd = countAllModels + countLR
localCrossMutr = crossoverMutation(XData, yData, clf, params, 'LR', AlgorithmsIDsEnd)
countLR += 1
crossoverDF = pd.DataFrame()
countAllModels = countAllModels + 5
for loop in range(setMaxLoopValue - 1):
localCrossMutr[0] = localCrossMutr[0] + localCrossMutr[(loop+1)*4]
localCrossMutr[1] = pd.concat([localCrossMutr[1], localCrossMutr[(loop+1)*4+1]], ignore_index=True)
@ -972,11 +997,13 @@ def CrossoverMutateFun():
else:
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': [crossoverDF['C'].iloc[0]], 'max_iter': [crossoverDF['max_iter'].iloc[0]], 'solver': [crossoverDF['solver'].iloc[0]], 'penalty': [crossoverDF['penalty'].iloc[0]]}
AlgorithmsIDsEnd = 215 + countLR
AlgorithmsIDsEnd = countAllModels + countLR
localCrossMutr = crossoverMutation(XData, yData, clf, params, 'LR', AlgorithmsIDsEnd)
countLR += 1
crossoverDF = pd.DataFrame()
countAllModels = countAllModels + 5
for loop in range(setMaxLoopValue - 1):
localCrossMutr[0] = localCrossMutr[0] + localCrossMutr[(loop+1)*4]
localCrossMutr[1] = pd.concat([localCrossMutr[1], localCrossMutr[(loop+1)*4+1]], ignore_index=True)
@ -990,15 +1017,15 @@ def CrossoverMutateFun():
HistoryPreservation = HistoryPreservation + allParametersPerfCrossMutrLRM
localCrossMutr.clear()
allParametersPerfCrossMutr = allParametersPerfCrossMutrKNNC + allParametersPerfCrossMutrKNNM + allParametersPerfCrossMutrLRC + allParametersPerfCrossMutrLRM
allParametersPerformancePerModel[0] = allParametersPerformancePerModel[0] + allParametersPerfCrossMutrKNNC[0] + allParametersPerfCrossMutrKNNM[0]
allParametersPerformancePerModel[1] = pd.concat([allParametersPerformancePerModel[1], allParametersPerfCrossMutrKNNC[1]], ignore_index=True)
allParametersPerformancePerModel[1] = pd.concat([allParametersPerformancePerModel[1], allParametersPerfCrossMutrKNNM[1]], ignore_index=True)
allParametersPerformancePerModel[2] = pd.concat([allParametersPerformancePerModel[2], allParametersPerfCrossMutrKNNC[2]], ignore_index=True)
allParametersPerformancePerModel[2] = pd.concat([allParametersPerformancePerModel[2], allParametersPerfCrossMutrKNNM[2]], ignore_index=True)
allParametersPerformancePerModel[4] = allParametersPerformancePerModel[4] + allParametersPerfCrossMutrKNNC[0] + allParametersPerfCrossMutrKNNM[0]
allParametersPerformancePerModel[5] = pd.concat([allParametersPerformancePerModel[5], allParametersPerfCrossMutrKNNC[1]], ignore_index=True)
allParametersPerformancePerModel[5] = pd.concat([allParametersPerformancePerModel[5], allParametersPerfCrossMutrKNNM[1]], ignore_index=True)
allParametersPerformancePerModel[6] = pd.concat([allParametersPerformancePerModel[6], allParametersPerfCrossMutrKNNC[2]], ignore_index=True)
allParametersPerformancePerModel[6] = pd.concat([allParametersPerformancePerModel[6], allParametersPerfCrossMutrKNNM[2]], ignore_index=True)
allParametersPerformancePerModel[3] = pd.concat([allParametersPerformancePerModel[3], allParametersPerfCrossMutrKNNC[3]], ignore_index=True)
allParametersPerformancePerModel[3] = pd.concat([allParametersPerformancePerModel[3], allParametersPerfCrossMutrKNNM[3]], ignore_index=True)
@ -1008,14 +1035,15 @@ def CrossoverMutateFun():
allParametersPerformancePerModel[5] = pd.concat([allParametersPerformancePerModel[5], allParametersPerfCrossMutrLRC[1]], ignore_index=True)
allParametersPerformancePerModel[5] = pd.concat([allParametersPerformancePerModel[5], allParametersPerfCrossMutrLRM[1]], ignore_index=True)
allParametersPerformancePerModel[6] = pd.concat([allParametersPerformancePerModel[6], allParametersPerfCrossMutrLRC[2]], ignore_index=True)
allParametersPerformancePerModel[6] = pd.concat([allParametersPerformancePerModel[6], allParametersPerfCrossMutrLRM[2]], ignore_index=True)
allParametersPerformancePerModel[7] = pd.concat([allParametersPerformancePerModel[7], allParametersPerfCrossMutrLRC[3]], ignore_index=True)
allParametersPerformancePerModel[7] = pd.concat([allParametersPerformancePerModel[7], allParametersPerfCrossMutrLRM[3]], ignore_index=True)
print(allParametersPerformancePerModel[7])
addKNN = addLR
addLR = addLR + 10
# KNNIntIndex = []
# for dr in KNNIDs:
@ -1038,7 +1066,6 @@ def CrossoverMutateFun():
return 'Everything Okay'
def crossoverMutation(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
search = GridSearchCV(
estimator=clf, param_grid=params, cv=crossValidation, refit='accuracy',
scoring=scoring, verbose=0, n_jobs=-1)
@ -1134,7 +1161,6 @@ def PreprocessingIDsCM():
dicLRM = allParametersPerfCrossMutr[12]
df_concatIDs = dicKNNC + dicKNNM + dicLRC + dicLRM
return df_concatIDs
def PreprocessingMetricsCM():
@ -1163,13 +1189,28 @@ def PreprocessingPredCM():
dfLRC = pd.DataFrame.from_dict(dicLRC)
dfLRM = pd.DataFrame.from_dict(dicLRM)
dfKNN = pd.concat([dfKNNC, dfKNNM])
dfLR = pd.concat([dfLRC, dfLRM])
df_concatProbs = pd.concat([dfKNNC, dfKNNM, dfLRC, dfLRM])
predictionsKNN = []
for column, content in dfKNN.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsKNN.append(el)
predictionsLR = []
for column, content in dfLR.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsLR.append(el)
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
return [predictionsKNN, predictionsLR, predictions]
def PreprocessingParamCM():
dicKNNC = allParametersPerfCrossMutr[1]
@ -1198,6 +1239,7 @@ def PreprocessingParamCM():
dfLRM = dfLRM.T
df_params = pd.concat([dfKNNC, dfKNNM, dfLRC, dfLRM])
df_params = df_params.reset_index(drop=True)
return df_params
def PreprocessingParamSepCM():
@ -1326,30 +1368,37 @@ def CrossMutateResults(ModelSpaceMDSCM,ModelSpaceTSNECM,ModelSpaceUMAPCM,Predict
ResultsCM.append(json.dumps(ModelSpaceUMAPCM))
ResultsCM.append(json.dumps(PredictionProbSel))
return Results
return ResultsCM
def PreprocessingPredSel(SelectedIDs):
global addKNN
global addLR
numberIDKNN = []
numberIDLR = []
print(SelectedIDs)
for el in SelectedIDs:
match = re.match(r"([a-z]+)([0-9]+)", el, re.I)
if match:
items = match.groups()
if (items[0] == 'KNN'):
numberIDKNN.append(int(items[1]))
numberIDKNN.append(int(items[1]) - addKNN)
else:
numberIDLR.append(int(items[1]) - 100)
numberIDLR.append(int(items[1]) - addLR)
print(numberIDKNN)
dicKNN = allParametersPerformancePerModel[3]
dicLR = allParametersPerformancePerModel[7]
dfKNN = pd.DataFrame.from_dict(dicKNN)
print(dfKNN)
dfKNN = dfKNN.loc[numberIDKNN]
dfLR = pd.DataFrame.from_dict(dicLR)
dfLR = dfLR.loc[numberIDLR]
dfLR.index += 100
print(dfLR)
dfLR.index += addKNN
df_concatProbs = pd.concat([dfKNN, dfLR])
print(df_concatProbs)
predictionsKNN = []
for column, content in dfKNN.items():
@ -1360,6 +1409,7 @@ def PreprocessingPredSel(SelectedIDs):
for column, content in dfLR.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsLR.append(el)
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
@ -1375,6 +1425,7 @@ def RetrieveSelIDsPredict():
RetrieveIDsSelection = request.get_data().decode('utf8').replace("'", '"')
RetrieveIDsSelection = json.loads(RetrieveIDsSelection)
RetrieveIDsSelection = RetrieveIDsSelection['predictSelectionIDs']
ResultsSelPred = PreprocessingPredSel(RetrieveIDsSelection)
return 'Everything Okay'

Loading…
Cancel
Save