updated models RandomSearch CV Metrics

master
Angelos Chatzimparmpas 4 years ago
parent 30a2ac5bbe
commit 03deab186f
  1. BIN
      __pycache__/run.cpython-37.pyc
  2. BIN
      cachedir/joblib/run/randomSearch/131a748ed0f5d243f0ea93c7de3fa285/output.pkl
  3. BIN
      cachedir/joblib/run/randomSearch/1c55d588f2a725cacd0db75efbba66db/output.pkl
  4. 1
      cachedir/joblib/run/randomSearch/2352a49cf5a0ab5a678f59beb67dccd8/metadata.json
  5. BIN
      cachedir/joblib/run/randomSearch/5b038325dd2fce24b03707e069cb9fec/output.pkl
  6. 2
      cachedir/joblib/run/randomSearch/72f7fa02379dfc92d4c664b6c36e5abb/metadata.json
  7. 1
      cachedir/joblib/run/randomSearch/7b7c4f46bd8bbecee35ec82369081165/metadata.json
  8. 2
      cachedir/joblib/run/randomSearch/9536a6cfa3884d632815dbf101fc6cc0/metadata.json
  9. 2
      cachedir/joblib/run/randomSearch/a72b281c2de36e337e70607faf20f922/metadata.json
  10. BIN
      cachedir/joblib/run/randomSearch/b2eaebae0c8bac1326b354c9349fb8fc/output.pkl
  11. BIN
      cachedir/joblib/run/randomSearch/bc948e5112c7458861cc66c261433272/output.pkl
  12. 1
      cachedir/joblib/run/randomSearch/f2dc1dfd6f43b37cdcbd71a6de0ceb80/metadata.json
  13. BIN
      cachedir/joblib/run/randomSearch/f94f6f45ec13ac95464d12dacae2049a/output.pkl
  14. 1
      cachedir/joblib/run/randomSearch/fb4e2f9b18c63f6d1b0b6bc39f3853cb/metadata.json
  15. 4
      cachedir/joblib/run/randomSearch/func_code.py
  16. 22
      frontend/package-lock.json
  17. 1
      frontend/package.json
  18. 9
      frontend/src/components/DataSetExecController.vue
  19. 2
      frontend/src/components/Ensemble.vue
  20. 99
      frontend/src/components/GlobalParamController.vue
  21. 2
      frontend/src/components/HyperParameterSpace.vue
  22. 48
      frontend/src/components/Main.vue
  23. 54
      frontend/src/components/PerformanceMetrics.vue
  24. 130
      frontend/src/components/Predictions.vue
  25. 297
      run.py

Binary file not shown.

@ -1 +0,0 @@
{"duration": 236.167307138443, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "MLPClassifier(alpha=0.00021, hidden_layer_sizes=(113, 2), max_iter=100,\n random_state=42, solver='sgd', tol=0.00041000000000000005)", "params": "{'hidden_layer_sizes': [(60, 2), (61, 1), (62, 2), (63, 3), (64, 3), (65, 1), (66, 2), (67, 2), (68, 1), (69, 2), (70, 2), (71, 3), (72, 2), (73, 3), (74, 2), (75, 1), (76, 2), (77, 2), (78, 1), (79, 3), (80, 1), (81, 3), (82, 3), (83, 1), (84, 2), (85, 1), (86, 1), (87, 2), (88, 3), (89, 2), (90, 2), (91, 3), (92, 1), (93, 1), (94, 1), (95, 1), (96, 2), (97, 1), (98, 3), (99, 2), (100, 1), (101, 3), (102, 1), (103, 3), (104, 3), (105, 1), (106, 1), (107, 1), (108, 3), (109, 1), (110, 1), (111, 3), (112, 2), (113, 2), (114, 1), (115, 2), (116, 2), (117, 3), (118, 3), (119, 2)], 'alpha': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001], 'tol': [1e-05, 0.00041000000000000005, 0.0008100000000000001], 'max_iter': [100], 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver': ['adam', 'sgd']}", "eachAlgor": "'MLP'", "AlgorithmsIDsEnd": "200"}}

@ -1 +1 @@
{"duration": 23.21109628677368, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='ball_tree', metric='manhattan', n_neighbors=57,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}
{"duration": 329.0160319805145, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "GradientBoostingClassifier(criterion='mse', learning_rate=0.01, n_estimators=82,\n random_state=42)", "params": "{'n_estimators': [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'learning_rate': [0.01, 0.12], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'GradB'", "AlgorithmsIDsEnd": "400"}}

@ -0,0 +1 @@
{"duration": 262.5104908943176, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "RandomForestClassifier(criterion='entropy', n_estimators=96, random_state=42)", "params": "{'n_estimators': [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'RF'", "AlgorithmsIDsEnd": "300"}}

@ -1 +1 @@
{"duration": 167.10102701187134, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "GradientBoostingClassifier(criterion='mse', learning_rate=0.12, n_estimators=80,\n random_state=42)", "params": "{'n_estimators': [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'learning_rate': [0.01, 0.12], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'GradB'", "AlgorithmsIDsEnd": "400"}}
{"duration": 22.592421054840088, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='ball_tree', metric='chebyshev', n_neighbors=66)", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -1 +1 @@
{"duration": 175.59277415275574, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "RandomForestClassifier(criterion='entropy', n_estimators=67, random_state=42)", "params": "{'n_estimators': [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'RF'", "AlgorithmsIDsEnd": "300"}}
{"duration": 224.38773703575134, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "MLPClassifier(alpha=0.00021, hidden_layer_sizes=(90, 2), max_iter=100,\n random_state=42, tol=0.0008100000000000001)", "params": "{'hidden_layer_sizes': [(60, 3), (61, 1), (62, 1), (63, 3), (64, 2), (65, 1), (66, 1), (67, 1), (68, 3), (69, 1), (70, 3), (71, 3), (72, 3), (73, 1), (74, 3), (75, 2), (76, 1), (77, 1), (78, 1), (79, 1), (80, 1), (81, 3), (82, 3), (83, 1), (84, 3), (85, 1), (86, 3), (87, 3), (88, 3), (89, 3), (90, 2), (91, 1), (92, 2), (93, 3), (94, 2), (95, 1), (96, 1), (97, 3), (98, 2), (99, 2), (100, 2), (101, 1), (102, 1), (103, 2), (104, 1), (105, 1), (106, 2), (107, 1), (108, 2), (109, 2), (110, 3), (111, 2), (112, 1), (113, 3), (114, 2), (115, 3), (116, 1), (117, 2), (118, 1), (119, 3)], 'alpha': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001], 'tol': [1e-05, 0.00041000000000000005, 0.0008100000000000001], 'max_iter': [100], 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver': ['adam', 'sgd']}", "eachAlgor": "'MLP'", "AlgorithmsIDsEnd": "200"}}

@ -0,0 +1 @@
{"duration": 72.83721280097961, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "LogisticRegression(C=28, max_iter=350, penalty='none', random_state=42,\n solver='saga')", "params": "{'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'max_iter': [50, 100, 150, 200, 250, 300, 350, 400, 450], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "100"}}

@ -1 +0,0 @@
{"duration": 93.8413097858429, "input_args": {"XData": " Fbs Slope Trestbps Exang Thalach Age Chol Sex Oldpeak Restecg Cp Ca Thal\n0 1 0 145 0 150 63 233 1 2.3 0 3 0 1\n1 0 0 130 0 187 37 250 1 3.5 1 2 0 2\n2 0 2 130 0 172 41 204 0 1.4 0 1 0 2\n3 0 2 120 0 178 56 236 1 0.8 1 1 0 2\n4 0 2 120 1 163 57 354 0 0.6 1 0 0 2\n.. ... ... ... ... ... ... ... ... ... ... .. .. ...\n298 0 1 140 1 123 57 241 0 0.2 1 0 0 3\n299 0 1 110 0 132 45 264 1 1.2 1 3 0 3\n300 1 1 144 0 141 68 193 1 3.4 1 0 2 3\n301 0 1 130 1 115 57 131 1 1.2 1 0 1 3\n302 0 1 130 0 174 57 236 0 0.0 0 1 1 2\n\n[303 rows x 13 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "LogisticRegression(C=15, max_iter=400, random_state=42)", "params": "{'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], 'max_iter': [50, 100, 150, 200, 250, 300, 350, 400, 450], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "100"}}

@ -1,9 +1,7 @@
# first line: 547
# first line: 548
@memory.cache
def randomSearch(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
print('inside')
search = RandomizedSearchCV(
estimator=clf, param_distributions=params, n_iter=100,
cv=crossValidation, refit='accuracy', scoring=scoring,

@ -9418,6 +9418,28 @@
"d3-path": "1"
}
},
"d3-simple-slider": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/d3-simple-slider/-/d3-simple-slider-1.9.0.tgz",
"integrity": "sha512-gshuzXjoX5mpeuMfAMcHc86/XTL3lvKpepVKwnwozRrnPvM0Zw8uE+VQeIfLbxbtxiC424DXeEWtchO8hpPJ1w==",
"requires": {
"d3-array": "^1.0.0",
"d3-axis": "^1.0.8",
"d3-dispatch": "^1.0.0",
"d3-drag": "^1.0.0",
"d3-ease": "^1.0.0",
"d3-scale": "^2.0.0",
"d3-selection": "^1.0.0",
"d3-transition": "^1.0.0"
},
"dependencies": {
"d3-array": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz",
"integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw=="
}
}
},
"d3-svg-legend": {
"version": "1.13.0",
"resolved": "https://registry.npmjs.org/d3-svg-legend/-/d3-svg-legend-1.13.0.tgz",

@ -43,6 +43,7 @@
"d3-loom": "^1.0.2",
"d3-selection": "^1.4.1",
"d3-selection-multi": "^1.0.1",
"d3-simple-slider": "^1.9.0",
"d3-svg-legend": "^1.13.0",
"d3_exploding_boxplot": "^0.2.1",
"file-saver": "^2.0.2",

@ -1,10 +1,9 @@
<template>
<div>
<div class="text-center">
<label id="data" for="param-dataset" data-toggle="tooltip" data-placement="right" title="Tip: use one of the data sets already provided or upload a new file.">{{ dataset }}</label>
<select id="selectFile" @change="selectDataSet()">
<option value="HeartC.csv" selected>Heart Disease</option>
<option value="IrisC.csv">Iris</option>
<option value="local">Upload New File</option>
<option value="local">Upload File</option>
</select>
<button class="btn-outline-success"
id="initializeID"
@ -37,7 +36,7 @@ export default {
defaultDataSet: 'HeartC', // default value for the first data set
searchText: 'Hyper-parameter search',
resetText: 'Reset',
dataset: 'Data set'
dataset: 'Data set:'
}
},
methods: {
@ -46,7 +45,7 @@ export default {
this.defaultDataSet = fileName.options[fileName.selectedIndex].value
this.defaultDataSet = this.defaultDataSet.split('.')[0]
if (this.defaultDataSet == "DiabetesC" || this.defaultDataSet == "HeartC" || this.defaultDataSet == "IrisC" || this.defaultDataSet == "StanceC") { // This is a function that handles a new file, which users can upload.
if (this.defaultDataSet == "HeartC") { // This is a function that handles a new file, which users can upload.
this.dataset = "Data set"
d3.select("#data").select("input").remove(); // Remove the selection field.
EventBus.$emit('SendToServerDataSetConfirmation', this.defaultDataSet)

@ -40,6 +40,8 @@ export default {
},
methods: {
reset () {
this.ScatterPlotResults = ''
this.storeEnsembleLoc = []
Plotly.purge('OverviewPlotlyCM')
},
selectVisualRepresentationCM () {

@ -0,0 +1,99 @@
<template>
<div style="margin: -20px 0 -20px 0 !important">
<div class="row align-items-center" style="margin: 0 0 -25px 0 !important">
<div class="col-lg-4"><p>Random search:</p></div>
<div class="col-lg-6"><div id="slider-stepPos"></div></div>
<div class="col-lg-2"><p id="value-stepPos"></p></div>
</div>
<div class="row align-items-center" style="margin: 0 0 -25px 0 !important">
<div class="col-lg-4"><p>Cross validation:</p></div>
<div class="col-lg-6"><div id="slider-stepNeg"></div></div>
<div class="col-lg-2"><p id="value-stepNeg"></p></div>
</div>
</div>
</template>
<script>
import { EventBus } from '../main.js'
import { sliderBottom } from 'd3-simple-slider'
import * as d3Base from 'd3'
// attach all d3 plugins to the d3 library
const d3 = Object.assign(d3Base, { sliderBottom })
export default {
name: 'GlobalParamController',
data () {
return {
}
},
methods: {
InitSliders () {
var svg = d3.select("#slider-stepPos");
svg.selectAll("*").remove();
var svg = d3.select("#slider-stepNeg");
svg.selectAll("*").remove();
var dataCorrect = [50.0, 100.0, 150.0, 200.0, 250.0, 300.0];
var dataWrong = [5.0, 10.0, 15.0];
var sliderStepPos = d3
.sliderBottom()
.min(d3.min(dataCorrect))
.max(d3.max(dataCorrect))
.width(200)
.tickFormat(d3.format(".0f"))
.ticks(6)
.step(50)
.default(100.0)
.on('onchange', val => {
d3.select('p#value-stepPos').text(d3.format(".0f")(val));
EventBus.$emit('SendtheChangeinRangePos', d3.format(".0f")(val))
});
var gStepPos = d3
.select('div#slider-stepPos')
.append('svg')
.attr('width', 500)
.attr('height', 100)
.append('g')
.attr('transform', 'translate(30,30)');
gStepPos.call(sliderStepPos);
d3.select('p#value-stepPos').text(d3.format(".0f")(sliderStepPos.value()));
var sliderStepNeg = d3
.sliderBottom()
.min(d3.min(dataWrong))
.max(d3.max(dataWrong))
.width(200)
.tickFormat(d3.format(".0f"))
.ticks(3)
.step(5)
.default(5.0)
.on('onchange', val => {
d3.select('p#value-stepNeg').text(d3.format(".0f")(val));
EventBus.$emit('SendtheChangeinRangeNeg', d3.format(".0f")(val))
});
var gStepNeg = d3
.select('div#slider-stepNeg')
.append('svg')
.attr('width', 500)
.attr('height', 100)
.append('g')
.attr('transform', 'translate(30,30)');
gStepNeg.call(sliderStepNeg);
d3.select('p#value-stepNeg').text(d3.format(".0f")(sliderStepNeg.value()));
},
},
mounted () {
this.InitSliders()
EventBus.$on('reset', this.InitSliders)
},
}
</script>

@ -39,6 +39,7 @@ export default {
},
methods: {
reset () {
this.ScatterPlotResults = ''
Plotly.purge('OverviewPlotly')
},
clean(obj) {
@ -59,6 +60,7 @@ export default {
Plotly.purge('OverviewPlotly')
var modelId = JSON.parse(this.ScatterPlotResults[0])
var colorsforScatterPlot = JSON.parse(this.ScatterPlotResults[1])
var parametersLoc = JSON.parse(this.ScatterPlotResults[2])
var parameters = JSON.parse(parametersLoc)

@ -10,6 +10,7 @@
<mdb-card-body>
<mdb-card-text class="text-left" style="font-size: 18.5px;">
<PerformanceMetrics/>
<GlobalParamController/>
<DataSetExecController/>
</mdb-card-text>
</mdb-card-body>
@ -19,7 +20,7 @@
<mdb-card>
<mdb-card-header color="primary-color" tag="h5" class="text-center">Provenance</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-left" style="font-size: 18.5px; min-height: 230px">
<mdb-card-text class="text-left" style="font-size: 18.5px; min-height: 359px">
</mdb-card-text>
</mdb-card-body>
</mdb-card>
@ -28,7 +29,7 @@
<mdb-card >
<mdb-card-header color="primary-color" tag="h5" class="text-center">Majority-Voting Ensemble's Results</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-left" style="font-size: 18.5px; min-height: 230px">
<mdb-card-text class="text-left" style="font-size: 18.5px; min-height: 359px">
</mdb-card-text>
</mdb-card-body>
</mdb-card>
@ -119,7 +120,7 @@
<b-col cols="6">
<mdb-card style="margin-top: 15px;">
<mdb-card-header color="primary-color" tag="h5" class="text-center">Hyper-Parameters' Space
[Sel: {{OverSelLength}} / All: {{OverAllLength}}]<small class="float-right"><active-scatter/></small>
[Sel: {{OverSelLength}} / All: {{OverAllLength}}]<small class="float-right"><active-scatter/></small><span class="badge badge-info badge-pill float-right">Projection<span class="badge badge-light" style="margin-left:4px; margin-bottom:1px">1</span></span>
</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-center" style="min-height: 600px">
@ -130,8 +131,8 @@
</b-col>
<b-col cols="6">
<mdb-card style="margin-top: 15px;">
<mdb-card-header color="primary-color" tag="h5" class="text-center">Models Included in the Majority-Voting Ensemble
[Sel: {{OverSelLengthCM}} / All: {{OverAllLengthCM}}]<small class="float-right"><active-scatter/></small>
<mdb-card-header color="primary-color" tag="h5" class="text-center">Majority-Voting Ensemble
[Sel: {{OverSelLengthCM}} / All: {{OverAllLengthCM}}]<small class="float-right"><active-scatter/></small><span class="badge badge-info badge-pill float-right">Projection<span class="badge badge-light" style="margin-left:4px; margin-bottom:1px">2</span></span>
</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-center" style="min-height: 600px">
@ -144,7 +145,7 @@
<b-row class="md-3">
<b-col cols="3">
<mdb-card style="margin-top: 15px;">
<mdb-card-header color="primary-color" tag="h5" class="text-center">Manipulation of Algorithms
<mdb-card-header color="primary-color" tag="h5" class="text-center">Manipulation of Algorithms<span class="badge badge-primary badge-pill float-right">Active<span class="badge badge-light" style="margin-left:4px; margin-bottom:1px">1&2</span></span>
</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-center" style="min-height: 270px">
@ -155,7 +156,7 @@
</b-col>
<b-col cols="6">
<mdb-card style="margin-top: 15px;">
<mdb-card-header color="primary-color" tag="h5" class="text-center">Predictive Results for Each Data Instance
<mdb-card-header color="primary-color" tag="h5" class="text-center">Predictive Results for Each Data Instance<span class="badge badge-primary badge-pill float-right">Active<span class="badge badge-light" style="margin-left:4px; margin-bottom:1px">{{projectionID_A}}</span></span>
</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-center" style="min-height: 270px">
@ -166,7 +167,7 @@
</b-col>
<b-col cols="3">
<mdb-card style="margin-top: 15px;">
<mdb-card-header color="primary-color" tag="h5" class="text-center">Performance for Each Validation Metric
<mdb-card-header color="primary-color" tag="h5" class="text-center">Performance for Each Validation Metric<span class="badge badge-primary badge-pill float-right">Active<span class="badge badge-light" style="margin-left:4px; margin-bottom:1px">{{projectionID_B}}</span></span>
</mdb-card-header>
<mdb-card-body>
<mdb-card-text class="text-center" style="min-height: 270px">
@ -189,6 +190,7 @@ import Algorithms from './Algorithms.vue'
import AlgorithmsController from './AlgorithmsController.vue'
import AlgorithmHyperParam from './AlgorithmHyperParam.vue'
import HyperParameterSpace from './HyperParameterSpace.vue'
import GlobalParamController from './GlobalParamController'
import Ensemble from './Ensemble.vue'
import VotingResults from './VotingResults.vue'
import Parameters from './Parameters.vue'
@ -216,6 +218,7 @@ export default Vue.extend({
AlgorithmsController,
AlgorithmHyperParam,
HyperParameterSpace,
GlobalParamController,
Ensemble,
Parameters,
Predictions,
@ -227,6 +230,8 @@ export default Vue.extend({
},
data () {
return {
projectionID_A: 1,
projectionID_B: 1,
storeEnsemble: [],
PredictSelEnsem: [],
firstTimeExec: true,
@ -281,6 +286,8 @@ export default Vue.extend({
ClassifierIDsListRemaining: [],
PredictSel: [],
storeBothEnsCM: [],
crossVal: '5',
RandomSear: '100',
}
},
methods: {
@ -673,8 +680,12 @@ export default Vue.extend({
},
fileNameSend () {
const path = `http://127.0.0.1:5000/data/ServerRequest`
const postData = {
fileName: this.RetrieveValueFile,
RandomSearch: this.RandomSear,
CrossValidation: this.crossVal,
Factors: this.basicValuesFact
}
const axiosConfig = {
headers: {
@ -773,6 +784,11 @@ export default Vue.extend({
Reset () {
const path = `http://127.0.0.1:5000/data/Reset`
this.reset = true
this.firstTimeExec = true
this.OverSelLength = 0
this.OverAllLength = 0
this.OverSelLengthCM = 0
this.OverAllLengthCM = 0
const postData = {
ClassifiersList: this.reset
}
@ -909,6 +925,14 @@ export default Vue.extend({
.catch(error => {
console.log(error)
})
},
changeActiveTo1 () {
this.projectionID_A = 1
this.projectionID_B = 1
},
changeActiveTo2 () {
this.projectionID_A = 2
this.projectionID_B = 2
}
},
created () {
@ -948,13 +972,17 @@ export default Vue.extend({
EventBus.$on('ReturningBrushedPointsIDs', data => { this.modelsUpdate = data })
//EventBus.$on('ReturningBrushedPointsIDs', this.UpdateBarChartFeatures )
EventBus.$on('ConfirmDataSet', this.fileNameSend)
EventBus.$on('reset', this.changeActiveTo1)
EventBus.$on('reset', this.Reset)
EventBus.$on('ReturningAlgorithms', data => { this.selectedAlgorithms = data })
EventBus.$on('ReturningBrushedPointsParams', data => { this.parametersofModels = data; })
EventBus.$on('RemainingPoints', this.changeActiveTo1)
EventBus.$on('RemainingPoints', data => { this.unselectedRemainingPoints = data })
EventBus.$on('InitializeCrossoverMutation', this.changeActiveTo2)
EventBus.$on('InitializeCrossoverMutation', this.sendPointsCrossMutat)
EventBus.$on('RemainingPointsCM', this.changeActiveTo2)
EventBus.$on('RemainingPointsCM', data => { this.unselectedRemainingPointsEnsem = data })
EventBus.$on('ChangeKey', data => { this.keyNow = data })
@ -1015,6 +1043,10 @@ export default Vue.extend({
EventBus.$on('toggleDeep', data => {this.toggleDeepMain = data})
EventBus.$on('SendtheChangeinRangePos', data => { this.RandomSear = data })
EventBus.$on('SendtheChangeinRangeNeg', data => { this.crossVal = data })
EventBus.$on('factorsChanged', data => { this.basicValuesFact = data })
//Prevent double click to search for a word.
document.addEventListener('mousedown', function (event) {
if (event.detail > 1) {

@ -14,7 +14,7 @@
<b-form-checkbox
id="checkboxAcc"
v-model="checkedAcc"
@click="clickAcc"
@change="clickAcc()"
>
</b-form-checkbox>
</td>
@ -23,7 +23,7 @@
<b-form-checkbox
id="checkboxGM"
v-model="checkedGM"
@click="clickGM"
@change="clickGM()"
>
</b-form-checkbox>
</td>
@ -34,7 +34,7 @@
<b-form-checkbox
id="checkboxPrec"
v-model="checkedPrec"
@click="clickPrec"
@change="clickPrec()"
>
</b-form-checkbox>
</td>
@ -43,7 +43,7 @@
<b-form-checkbox
id="checkboxRA"
v-model="checkedRA"
@click="clickRA"
@change="clickRA()"
>
</b-form-checkbox>
</td>
@ -54,7 +54,7 @@
<b-form-checkbox
id="checkboxRec"
v-model="checkedRec"
@click="clickRec"
@change="clickRec()"
>
</b-form-checkbox>
</td>
@ -63,7 +63,7 @@
<b-form-checkbox
id="checkboxLog"
v-model="checkedLog"
@click="clickLog"
@change="clickLog()"
>
</b-form-checkbox>
</td>
@ -74,7 +74,7 @@
<b-form-checkbox
id="checkboxF1"
v-model="checkedF1"
@click="clickF1"
@change="clickF1()"
>
</b-form-checkbox>
</td>
@ -83,7 +83,7 @@
<b-form-checkbox
id="checkboxMCC"
v-model="checkedMCC"
@click="clickMCC"
@change="clickMCC()"
>
</b-form-checkbox>
</td>
@ -97,6 +97,7 @@
<script>
import { EventBus } from '../main.js'
import $ from 'jquery'
export default {
name: 'PerformanceMetrics',
@ -110,36 +111,67 @@
checkedRA: false,
checkedLog: false,
checkedMCC: false,
factorsLocal: [1,1,1,1,0,0,0,0]
}
},
methods: {
clickAcc () {
this.checkedAcc = !this.checkedAcc
console.log(this.checkedAcc)
this.factorsRegisterChange(0,this.checkedAcc)
},
clickPrec () {
this.checkedPrec = !this.checkedPrec
this.factorsRegisterChange(1,this.checkedPrec)
},
clickRec () {
this.checkedRec = !this.checkedRec
this.checkedRec = !this.checkedRec
this.factorsRegisterChange(2,this.checkedRec)
},
clickF1 () {
this.checkedF1 = !this.checkedF1
this.checkedF1 = !this.checkedF1
this.factorsRegisterChange(3,this.checkedF1)
},
clickGM () {
this.checkedGM = !this.checkedGM
this.factorsRegisterChange(4,this.checkedGM)
},
clickRA () {
this.checkedRA = !this.checkedRA
this.factorsRegisterChange(5,this.checkedRA)
},
clickLog () {
this.checkedLog = !this.checkedLog
this.factorsRegisterChange(6,this.checkedLog)
},
clickMCC () {
this.checkedMCC = !this.checkedMCC
this.factorsRegisterChange(7,this.checkedMCC)
},
factorsRegisterChange (position,value) {
if (value == true) {
this.factorsLocal[position] = 1
} else {
this.factorsLocal[position] = 0
}
EventBus.$emit('factorsChanged', this.factorsLocal)
},
resetBoxes () {
this.checkedAcc = true
this.checkedPrec = true
this.checkedRec = true
this.checkedF1 = true
this.checkedGM = false
this.checkedRA = false
this.checkedLog = false
this.checkedMCC = false
this.factorsLocal = [1,1,1,1,0,0,0,0]
EventBus.$emit('factorsChanged', this.factorsLocal)
}
},
mounted () {
EventBus.$on('reset', this.resetBoxes)
}
}
</script>

@ -32,6 +32,10 @@ export default {
svg.selectAll("*").remove();
var svg = d3.select("#containerSelection");
svg.selectAll("*").remove();
this.GetResultsAll = []
this.GetResultsSelection = []
this.predictSelection = []
this.StoreIndices = []
},
Grid () {
@ -60,13 +64,22 @@ export default {
var predictions = JSON.parse(this.GetResultsAll[12])
var KNNPred = predictions[0]
var LRPred = predictions[1]
var PredAver = predictions[2]
var MLPPred = predictions[2]
var RFPred = predictions[3]
var GradBPred = predictions[4]
var PredAver = predictions[5]
var dataAver = []
var dataAverGetResults = []
var dataKNN = []
var dataKNNResults = []
var dataLR = []
var dataLRResults = []
var dataMLP = []
var dataMLPResults = []
var dataRF = []
var dataRFResults = []
var dataGradB = []
var dataGradBResults = []
var max = 0
for (let i = 0; i < targetNames.length; i++) {
@ -79,26 +92,41 @@ export default {
var size = sqrtSize * sqrtSize
for (let i = 0; i < targetNames.length; i++) {
dataAver = [];
dataAver = []
dataKNN = []
dataLR = []
dataMLP = []
dataRF = []
dataGradB = []
getIndices[targetNames[i]].forEach(element => {
dataAver.push({ id: element, value: PredAver[element][targetNames[i]] })
dataKNN.push({ id: element, value: KNNPred[element][targetNames[i]] })
dataLR.push({ id: element, value: LRPred[element][targetNames[i]] })
dataMLP.push({ id: element, value: MLPPred[element][targetNames[i]] })
dataRF.push({ id: element, value: RFPred[element][targetNames[i]] })
dataGradB.push({ id: element, value: GradBPred[element][targetNames[i]] })
});
for (let j = 0; j < size - getIndices[targetNames[i]].length; j++) {
dataAver.push({ id: null, value: 1.0 })
dataKNN.push({ id: null, value: 1.0 })
dataLR.push({ id: null, value: 1.0 })
dataMLP.push({ id: null, value: 1.0 })
dataRF.push({ id: null, value: 1.0 })
dataGradB.push({ id: null, value: 1.0 })
}
dataAverGetResults.push(dataAver)
dataKNNResults.push(dataKNN)
dataLRResults.push(dataLR)
dataMLPResults.push(dataMLP)
dataRFResults.push(dataRF)
dataGradBResults.push(dataGradB)
}
dataAverGetResults.reverse()
dataKNNResults.reverse()
dataLRResults.reverse()
dataMLPResults.reverse()
dataRFResults.reverse()
dataGradBResults.reverse()
var classArray = []
this.StoreIndices = []
@ -119,15 +147,27 @@ export default {
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
classArray.push(dataAverGetResults[i].concat(dataKNNResults[i], dataLRResults[i]));
dataMLPResults[i].sort(function(a, b){
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
dataRFResults[i].sort(function(a, b){
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
dataGradBResults[i].sort(function(a, b){
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
classArray.push(dataAverGetResults[i].concat(dataKNNResults[i], dataLRResults[i],dataMLPResults[i],dataRFResults[i],dataGradBResults[i]));
}
var classStore = [].concat.apply([], classArray);
// === Set up canvas === //
var width = 1200,
height = 125;
height = 85;
var colourScale;
@ -144,9 +184,9 @@ export default {
var custom = d3.select(customBase); // this is our svg replacement
// settings for a grid with 40 cells in a row and 2x5 cells in a group
var groupSpacing = 60;
var groupSpacing = 40;
var cellSpacing = 2;
var cellSize = Math.floor((width - 1 * groupSpacing) / (10 * sqrtSize)) - cellSpacing;
var cellSize = Math.floor((width - 1 * groupSpacing) / (13 * sqrtSize)) - cellSpacing;
// === First call === //
databind(classStore, size, sqrtSize); // ...then update the databind function
@ -187,6 +227,13 @@ export default {
.attr('width', cellSize)
.attr('height', cellSize)
.attr('fillStyle', function(d) { return colourScale(d.value); })
.attr('fill-opacity', function(d) {
if (d.id == null) {
return "0.0";
} else {
return "1.0";
}
});
var exitSel = join.exit()
.transition()
@ -240,21 +287,29 @@ export default {
svg.selectAll("*").remove();
var predictionsAll = JSON.parse(this.GetResultsAll[12])
console.log(predictionsAll)
if (this.predictSelection.length != 0) {
var predictions = this.predictSelection
var KNNPred = predictions[0]
var LRPred = predictions[1]
var PredAver = predictions[2]
var MLPPred = predictions[2]
var RFPred = predictions[3]
var GradBPred = predictions[4]
var PredAver = predictions[5]
} else {
var KNNPred = predictionsAll[0]
var LRPred = predictionsAll[1]
var PredAver = predictionsAll[2]
var MLPPred = predictionsAll[2]
var RFPred = predictionsAll[3]
var GradBPred = predictionsAll[4]
var PredAver = predictionsAll[5]
}
var KNNPredAll = predictionsAll[0]
var LRPredAll = predictionsAll[1]
var PredAverAll = predictionsAll[2]
var MLPPredAll = predictionsAll[2]
var RFPredAll = predictionsAll[3]
var GradBPredAll = predictionsAll[4]
var PredAverAll = predictionsAll[5]
var yValues = JSON.parse(this.GetResultsSelection[6])
var targetNames = JSON.parse(this.GetResultsSelection[7])
@ -271,6 +326,12 @@ export default {
var dataKNNResults = []
var dataLR = []
var dataLRResults = []
var dataMLP = []
var dataMLPResults = []
var dataRF = []
var dataRFResults = []
var dataGradB = []
var dataGradBResults = []
var max = 0
for (let i = 0; i < targetNames.length; i++) {
@ -283,27 +344,42 @@ export default {
var size = sqrtSize * sqrtSize
for (let i = 0; i < targetNames.length; i++) {
dataAver = [];
dataAver = []
dataKNN = []
dataLR = []
dataMLP = []
dataRF = []
dataGradB = []
getIndices[targetNames[i]].forEach(element => {
dataAver.push({ id: element, value: PredAver[element][targetNames[i]] - PredAverAll[element][targetNames[i]] })
dataKNN.push({ id: element, value: KNNPred[element][targetNames[i]] - KNNPredAll[element][targetNames[i]] })
dataLR.push({ id: element, value: LRPred[element][targetNames[i]] - LRPredAll[element][targetNames[i]] })
dataMLP.push({ id: element, value: MLPPred[element][targetNames[i]] - MLPPredAll[element][targetNames[i]] })
dataRF.push({ id: element, value: RFPred[element][targetNames[i]] - RFPredAll[element][targetNames[i]] })
dataGradB.push({ id: element, value: GradBPred[element][targetNames[i]] - GradBPredAll[element][targetNames[i]] })
});
for (let j = 0; j < size - getIndices[targetNames[i]].length; j++) {
dataAver.push({ id: null, value: 0 })
dataKNN.push({ id: null, value: 0 })
dataLR.push({ id: null, value: 0 })
dataMLP.push({ id: null, value: 0 })
dataRF.push({ id: null, value: 0 })
dataGradB.push({ id: null, value: 0 })
}
dataAverGetResults.push(dataAver)
dataKNNResults.push(dataKNN)
dataLRResults.push(dataLR)
dataMLPResults.push(dataMLP)
dataRFResults.push(dataRF)
dataGradBResults.push(dataGradB)
}
dataAverGetResults.reverse()
dataKNNResults.reverse()
dataLRResults.reverse()
dataMLPResults.reverse()
dataRFResults.reverse()
dataGradBResults.reverse()
var classArray = []
for (let i = 0; i < dataAverGetResults.length; i++) {
@ -321,14 +397,27 @@ export default {
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
classArray.push(dataAverGetResults[i].concat(dataKNNResults[i], dataLRResults[i]));
dataMLPResults[i].sort(function(a, b){
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
dataRFResults[i].sort(function(a, b){
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
dataGradBResults[i].sort(function(a, b){
return indices.indexOf(a.id) - indices.indexOf(b.id)
});
classArray.push(dataAverGetResults[i].concat(dataKNNResults[i], dataLRResults[i], dataMLPResults[i], dataRFResults[i], dataGradBResults[i]));
}
var classStore = [].concat.apply([], classArray);
// === Set up canvas === //
var width = 1200,
height = 125;
height = 85;
var colourScale;
@ -345,9 +434,9 @@ export default {
var custom = d3.select(customBase); // this is our svg replacement
// settings for a grid with 40 cells in a row and 2x5 cells in a group
var groupSpacing = 60;
var groupSpacing = 40;
var cellSpacing = 2;
var cellSize = Math.floor((width - 1 * groupSpacing) / (10 * sqrtSize)) - cellSpacing;
var cellSize = Math.floor((width - 1 * groupSpacing) / (13 * sqrtSize)) - cellSpacing;
// === First call === //
databind(classStore, size, sqrtSize); // ...then update the databind function
@ -388,6 +477,13 @@ export default {
.attr('width', cellSize)
.attr('height', cellSize)
.attr('fillStyle', function(d) { return colourScale(d.value); })
.attr('fill-opacity', function(d) {
if (d.id == null) {
return "0.0";
} else {
return "1.0";
}
});
var exitSel = join.exit()
.transition()

297
run.py

@ -42,6 +42,12 @@ cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global Results
Results = []
global ResultsCM
ResultsCM = []
global DataRawLength
global DataResultsRaw
global previousState
@ -68,11 +74,20 @@ def reset():
global factors
factors = [1,1,1,1,0,0,0,0]
global crossValidation
crossValidation = 5
global randomSearchVar
randomSearchVar = 100
global keyData
keyData = 0
KNNModelsCount = 0
LRModelsCount = 100
LRModelsCount = KNNModelsCount+randomSearchVar
MLPModelsCount = LRModelsCount+randomSearchVar
RFModelsCount = MLPModelsCount+randomSearchVar
GradBModelsCount = RFModelsCount+randomSearchVar
global XData
XData = []
@ -86,7 +101,16 @@ def reset():
addKNN = 0
global addLR
addLR = 100
addLR = addKNN+randomSearchVar
global addMLP
addMLP = addLR+randomSearchVar
global addRF
addRF = addMLP+randomSearchVar
global addGradB
addGradB = addRF+randomSearchVar
global countAllModels
countAllModels = 0
@ -124,9 +148,6 @@ def reset():
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
@ -166,6 +187,7 @@ def retrieveFileName():
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
data = json.loads(fileName)
global keySpecInternal
keySpecInternal = 1
@ -182,6 +204,27 @@ def retrieveFileName():
global keyData
keyData = 0
global KNNModelsCount
global LRModelsCount
global MLPModelsCount
global RFModelsCount
global GradBModelsCount
global factors
factors = data['Factors']
global crossValidation
crossValidation = int(data['CrossValidation'])
global randomSearchVar
randomSearchVar = int(data['RandomSearch'])
KNNModelsCount = 0
LRModelsCount = KNNModelsCount+randomSearchVar
MLPModelsCount = LRModelsCount+randomSearchVar
RFModelsCount = MLPModelsCount+randomSearchVar
GradBModelsCount = RFModelsCount+randomSearchVar
global XData
XData = []
@ -219,7 +262,16 @@ def retrieveFileName():
addKNN = 0
global addLR
addLR = 100
addLR = addKNN+randomSearchVar
global addMLP
addMLP = addLR+randomSearchVar
global addRF
addRF = addMLP+randomSearchVar
global addGradB
addGradB = addRF+randomSearchVar
# Initializing models
@ -241,9 +293,6 @@ def retrieveFileName():
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
global scoring
scoring = {'accuracy': 'accuracy', 'precision_weighted': 'precision_weighted', 'recall_weighted': 'recall_weighted', 'f1_weighted': 'f1_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
@ -297,7 +346,7 @@ def retrieveFileName():
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
@ -514,26 +563,27 @@ def retrieveModel():
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(1,100,1)), 'max_iter': list(np.arange(50,500,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
countAllModels = countAllModels + 100
countAllModels = countAllModels + randomSearchVar
AlgorithmsIDsEnd = countAllModels
elif (eachAlgor) == 'MLP':
start = 60
stop = 120
step = 1
random.seed(RANDOM_SEED)
ranges = [(n, random.randint(1,3)) for n in range(start, stop, step)]
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'hidden_layer_sizes': ranges,'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
countAllModels = countAllModels + 100
countAllModels = countAllModels + randomSearchVar
AlgorithmsIDsEnd = countAllModels
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(20, 100)), 'criterion': ['gini', 'entropy']}
countAllModels = countAllModels + 100
countAllModels = countAllModels + randomSearchVar
AlgorithmsIDsEnd = countAllModels
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(20, 100)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
countAllModels = countAllModels + 100
countAllModels = countAllModels + randomSearchVar
AlgorithmsIDsEnd = countAllModels
allParametersPerformancePerModel = randomSearch(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd)
HistoryPreservation = allParametersPerformancePerModel.copy()
@ -547,8 +597,6 @@ memory = Memory(location, verbose=0)
@memory.cache
def randomSearch(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
print('inside')
search = RandomizedSearchCV(
estimator=clf, param_distributions=params, n_iter=100,
cv=crossValidation, refit='accuracy', scoring=scoring,
@ -640,30 +688,45 @@ def randomSearch(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
def PreprocessingIDs():
dicKNN = allParametersPerformancePerModel[0]
dicLR = allParametersPerformancePerModel[4]
dicMLP = allParametersPerformancePerModel[8]
dicRF = allParametersPerformancePerModel[12]
dicGradB = allParametersPerformancePerModel[16]
df_concatIDs = dicKNN + dicLR
df_concatIDs = dicKNN + dicLR + dicMLP + dicRF + dicGradB
return df_concatIDs
def PreprocessingMetrics():
dicKNN = allParametersPerformancePerModel[2]
dicLR = allParametersPerformancePerModel[6]
dicMLP = allParametersPerformancePerModel[10]
dicRF = allParametersPerformancePerModel[14]
dicGradB = allParametersPerformancePerModel[18]
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfLR = pd.DataFrame.from_dict(dicLR)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfRF = pd.DataFrame.from_dict(dicRF)
dfGradB = pd.DataFrame.from_dict(dicGradB)
df_concatMetrics = pd.concat([dfKNN, dfLR])
df_concatMetrics = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
df_concatMetrics = df_concatMetrics.reset_index(drop=True)
return df_concatMetrics
def PreprocessingPred():
dicKNN = allParametersPerformancePerModel[3]
dicLR = allParametersPerformancePerModel[7]
dicMLP = allParametersPerformancePerModel[11]
dicRF = allParametersPerformancePerModel[15]
dicGradB = allParametersPerformancePerModel[19]
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfLR = pd.DataFrame.from_dict(dicLR)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfRF = pd.DataFrame.from_dict(dicRF)
dfGradB = pd.DataFrame.from_dict(dicGradB)
df_concatProbs = pd.concat([dfKNN, dfLR])
df_concatProbs = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
df_concatProbs.reset_index(drop=True, inplace=True)
predictionsKNN = []
@ -675,13 +738,28 @@ def PreprocessingPred():
for column, content in dfLR.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsLR.append(el)
predictionsMLP = []
for column, content in dfMLP.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsMLP.append(el)
predictionsRF = []
for column, content in dfRF.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsRF.append(el)
predictionsGradB = []
for column, content in dfGradB.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsGradB.append(el)
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return [predictionsKNN, predictionsLR, predictions]
return [predictionsKNN, predictionsLR, predictionsMLP, predictionsRF, predictionsGradB, predictions]
def PreprocessingPredEnsemble():
@ -689,29 +767,49 @@ def PreprocessingPredEnsemble():
numberIDKNN = []
numberIDLR = []
numberIDMLP = []
numberIDRF = []
numberIDGradB = []
for el in EnsembleActive:
match = re.match(r"([a-z]+)([0-9]+)", el, re.I)
if match:
items = match.groups()
if (items[0] == 'KNN'):
numberIDKNN.append(int(items[1]))
else:
elif (items[0] == 'LR'):
numberIDLR.append(int(items[1]))
elif (items[0] == 'MLP'):
numberIDMLP.append(int(items[1]))
elif (items[0] == 'RF'):
numberIDRF.append(int(items[1]))
else:
numberIDGradB.append(int(items[1]))
dicKNN = allParametersPerformancePerModel[3]
dicLR = allParametersPerformancePerModel[7]
dicMLP = allParametersPerformancePerModel[11]
dicRF = allParametersPerformancePerModel[15]
dicGradB = allParametersPerformancePerModel[19]
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfLR = pd.DataFrame.from_dict(dicLR)
df_concatProbs = pd.concat([dfKNN, dfLR])
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfRF = pd.DataFrame.from_dict(dicRF)
dfGradB = pd.DataFrame.from_dict(dicGradB)
df_concatProbs = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
df_concatProbs = df_concatProbs.reset_index(drop=True)
dfKNN = df_concatProbs.loc[numberIDKNN]
dfLR = df_concatProbs.loc[numberIDLR]
dfMLP = df_concatProbs.loc[numberIDMLP]
dfRF = df_concatProbs.loc[numberIDRF]
dfGradB = df_concatProbs.loc[numberIDGradB]
df_concatProbs = pd.DataFrame()
df_concatProbs = df_concatProbs.iloc[0:0]
df_concatProbs = pd.concat([dfKNN, dfLR])
df_concatProbs = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
predictionsKNN = []
for column, content in dfKNN.items():
@ -723,50 +821,95 @@ def PreprocessingPredEnsemble():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsLR.append(el)
predictionsMLP = []
for column, content in dfMLP.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsMLP.append(el)
predictionsRF = []
for column, content in dfRF.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsRF.append(el)
predictionsGradB = []
for column, content in dfGradB.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsGradB.append(el)
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return [predictionsKNN, predictionsLR, predictions]
return [predictionsKNN, predictionsLR, predictionsMLP, predictionsRF, predictionsGradB, predictions]
def PreprocessingParam():
dicKNN = allParametersPerformancePerModel[1]
dicLR = allParametersPerformancePerModel[5]
dicMLP = allParametersPerformancePerModel[9]
dicRF = allParametersPerformancePerModel[13]
dicGradB = allParametersPerformancePerModel[17]
dicKNN = dicKNN['params']
dicLR = dicLR['params']
dicMLP = dicMLP['params']
dicRF = dicRF['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfLR = pd.DataFrame.from_dict(dicLR)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfRF = pd.DataFrame.from_dict(dicRF)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfLR = dfLR.T
dfMLP = dfMLP.T
dfRF = dfRF.T
dfGradB = dfGradB.T
df_params = pd.concat([dfKNN, dfLR])
df_params = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
df_params = df_params.reset_index(drop=True)
return df_params
def PreprocessingParamSep():
dicKNN = allParametersPerformancePerModel[1]
dicLR = allParametersPerformancePerModel[5]
dicMLP = allParametersPerformancePerModel[9]
dicRF = allParametersPerformancePerModel[13]
dicGradB = allParametersPerformancePerModel[17]
dicKNN = dicKNN['params']
dicLR = dicLR['params']
dicMLP = dicMLP['params']
dicRF = dicRF['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfLR = pd.DataFrame.from_dict(dicLR)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfRF = pd.DataFrame.from_dict(dicRF)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfLR = dfLR.T
dfMLP = dfMLP.T
dfRF = dfRF.T
dfGradB = dfGradB.T
return [dfKNN, dfLR]
return [dfKNN, dfLR, dfMLP, dfRF, dfGradB]
# remove that maybe!
def preProcsumPerMetric(factors):
@ -1463,28 +1606,61 @@ def PreprocessingPredSel(SelectedIDs):
global addKNN
global addLR
global addMLP
global addRF
global addGradB
numberIDKNN = []
numberIDLR = []
numberIDMLP = []
numberIDRF = []
numberIDGradB = []
for el in SelectedIDs:
match = re.match(r"([a-z]+)([0-9]+)", el, re.I)
if match:
items = match.groups()
if (items[0] == 'KNN'):
numberIDKNN.append(int(items[1]) -addKNN)
else:
numberIDKNN.append(int(items[1]) - addKNN)
elif (items[0] == 'LR'):
numberIDLR.append(int(items[1]) - addLR)
elif (items[0] == 'MLP'):
numberIDMLP.append(int(items[1]) - addMLP)
elif (items[0] == 'RF'):
numberIDRF.append(int(items[1]) - addRF)
else:
numberIDGradB.append(int(items[1]) - addGradB)
dicKNN = allParametersPerformancePerModel[3]
dicLR = allParametersPerformancePerModel[7]
dicMLP = allParametersPerformancePerModel[11]
dicRF = allParametersPerformancePerModel[15]
dicGradB = allParametersPerformancePerModel[19]
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfKNN = dfKNN.loc[numberIDKNN]
dfLR = pd.DataFrame.from_dict(dicLR)
dfLR = dfLR.loc[numberIDLR]
dfLR.index += addKNN
df_concatProbs = pd.concat([dfKNN, dfLR])
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfMLP = dfMLP.loc[numberIDMLP]
dfMLP.index += addKNN + addLR
dfRF = pd.DataFrame.from_dict(dicRF)
dfRF = dfRF.loc[numberIDRF]
dfRF.index += addKNN + addLR + addMLP
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfGradB = dfGradB.loc[numberIDGradB]
dfGradB.index += addKNN + addLR + addMLP + addRF
df_concatProbs = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
predictionsKNN = []
for column, content in dfKNN.items():
@ -1496,12 +1672,27 @@ def PreprocessingPredSel(SelectedIDs):
el = [sum(x)/len(x) for x in zip(*content)]
predictionsLR.append(el)
predictionsMLP = []
for column, content in dfMLP.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsMLP.append(el)
predictionsRF = []
for column, content in dfRF.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsRF.append(el)
predictionsGradB = []
for column, content in dfGradB.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsGradB.append(el)
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return [predictionsKNN, predictionsLR, predictions]
return [predictionsKNN, predictionsLR, predictionsMLP, predictionsRF, predictionsGradB, predictions]
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverSelIDs', methods=["GET", "POST"])
@ -1528,30 +1719,49 @@ def PreprocessingPredSelEnsem(SelectedIDsEnsem):
numberIDKNN = []
numberIDLR = []
numberIDMLP = []
numberIDRF = []
numberIDGradB = []
for el in SelectedIDsEnsem:
match = re.match(r"([a-z]+)([0-9]+)", el, re.I)
if match:
items = match.groups()
if (items[0] == 'KNN'):
numberIDKNN.append(int(items[1]))
elif (items[0] == 'LR'):
numberIDLR.append(int(items[1]))
elif (items[0] == 'MLP'):
numberIDLR.append(int(items[1]))
elif (items[0] == 'RF'):
numberIDLR.append(int(items[1]))
else:
numberIDLR.append(int(items[1]))
dicKNN = allParametersPerformancePerModel[3]
dicLR = allParametersPerformancePerModel[7]
dicMLP = allParametersPerformancePerModel[11]
dicRF = allParametersPerformancePerModel[15]
dicGradB = allParametersPerformancePerModel[19]
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfLR = pd.DataFrame.from_dict(dicLR)
df_concatProbs = pd.concat([dfKNN, dfLR])
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfRF = pd.DataFrame.from_dict(dicRF)
dfGradB = pd.DataFrame.from_dict(dicGradB)
df_concatProbs = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
df_concatProbs = df_concatProbs.reset_index(drop=True)
dfKNN = df_concatProbs.loc[numberIDKNN]
dfLR = df_concatProbs.loc[numberIDLR]
dfMLP = df_concatProbs.loc[numberIDMLP]
dfRF = df_concatProbs.loc[numberIDRF]
dfGradB = df_concatProbs.loc[numberIDGradB]
df_concatProbs = pd.DataFrame()
df_concatProbs = df_concatProbs.iloc[0:0]
df_concatProbs = pd.concat([dfKNN, dfLR])
df_concatProbs = pd.concat([dfKNN, dfLR, dfMLP, dfRF, dfGradB])
predictionsKNN = []
for column, content in dfKNN.items():
@ -1563,12 +1773,27 @@ def PreprocessingPredSelEnsem(SelectedIDsEnsem):
el = [sum(x)/len(x) for x in zip(*content)]
predictionsLR.append(el)
predictionsMLP = []
for column, content in dfMLP.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsMLP.append(el)
predictionsRF = []
for column, content in dfRF.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsRF.append(el)
predictionsGradB = []
for column, content in dfGradB.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsGradB.append(el)
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return [predictionsKNN, predictionsLR, predictions]
return [predictionsKNN, predictionsLR, predictionsMLP, predictionsRF, predictionsGradB, predictions]
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverSelIDsEnsem', methods=["GET", "POST"])

Loading…
Cancel
Save