master
parent 4887518984
commit aacd73ba73
  1. BIN
      __pycache__/run.cpython-37.pyc
  2. 1
      cachedir/joblib/run/GridSearchForModels/01391d5b7d809b9da8216d82630ec74d/metadata.json
  3. 1
      cachedir/joblib/run/GridSearchForModels/1289ed21cfae05e620dbdf09a1ffb437/metadata.json
  4. BIN
      cachedir/joblib/run/GridSearchForModels/1e455e769a62ea511c2cdc253692f9f3/output.pkl
  5. 1
      cachedir/joblib/run/GridSearchForModels/231801fa9c8325943fbbb9638021cf43/metadata.json
  6. 1
      cachedir/joblib/run/GridSearchForModels/27fb24df3dff8a334de9acfb20665e0e/metadata.json
  7. BIN
      cachedir/joblib/run/GridSearchForModels/2c4edd3fe62fd40568ca7f7561419075/output.pkl
  8. BIN
      cachedir/joblib/run/GridSearchForModels/3ce92a7bcd65ab00b3d79210d1161d9f/output.pkl
  9. 1
      cachedir/joblib/run/GridSearchForModels/3fe88bc8502752b8df3f40320d27a183/metadata.json
  10. 1
      cachedir/joblib/run/GridSearchForModels/44564a790ced36689a726d58cdcc81eb/metadata.json
  11. BIN
      cachedir/joblib/run/GridSearchForModels/49fd4ec2cb297701d1ed495edf356415/output.pkl
  12. BIN
      cachedir/joblib/run/GridSearchForModels/4dc60b5d3e7ea84e397d51513ba22b4b/output.pkl
  13. 1
      cachedir/joblib/run/GridSearchForModels/60b98008196c95d4a5009597dd562ea5/metadata.json
  14. BIN
      cachedir/joblib/run/GridSearchForModels/66ed1ea70506cc8b5ca5534fe678bca1/output.pkl
  15. 1
      cachedir/joblib/run/GridSearchForModels/7542a9fb28e131960e44852d33049c20/metadata.json
  16. 1
      cachedir/joblib/run/GridSearchForModels/76519e903e81fb48c4d44abbc75bbc3c/metadata.json
  17. 1
      cachedir/joblib/run/GridSearchForModels/7aa4b0bfdf5ad1ec09afbdf111d870b6/metadata.json
  18. 1
      cachedir/joblib/run/GridSearchForModels/98bb8d718b1c0023eaafddc6746211e8/metadata.json
  19. 1
      cachedir/joblib/run/GridSearchForModels/9bdfbbb781609486c8697f91d6960c0f/metadata.json
  20. BIN
      cachedir/joblib/run/GridSearchForModels/9f82504e481e73b7dfc700f2b013ab1e/output.pkl
  21. 1
      cachedir/joblib/run/GridSearchForModels/a69ae6b229cf595a26fa8709787ce9e7/metadata.json
  22. BIN
      cachedir/joblib/run/GridSearchForModels/aaf8008b46bc250a1ce9f54973d7ef9a/output.pkl
  23. 1
      cachedir/joblib/run/GridSearchForModels/ab217966ac94ecfcb1f16130f136aadd/metadata.json
  24. BIN
      cachedir/joblib/run/GridSearchForModels/ac4f6dc4ccc2d978f1c3dd9edd67372a/output.pkl
  25. 1
      cachedir/joblib/run/GridSearchForModels/ad0a720d75812a4bedd57fc40900baac/metadata.json
  26. BIN
      cachedir/joblib/run/GridSearchForModels/bbe4c1e011dd495e308dfa7307fddae5/output.pkl
  27. BIN
      cachedir/joblib/run/GridSearchForModels/c50ec33a65037c5e16dedd3fb0a5c417/output.pkl
  28. BIN
      cachedir/joblib/run/GridSearchForModels/d412c05bdd6f3503d5f51b82cde55226/output.pkl
  29. BIN
      cachedir/joblib/run/GridSearchForModels/e1a97a94b8b2ed7d114fbfa3f6a30b91/output.pkl
  30. BIN
      cachedir/joblib/run/GridSearchForModels/ee3fd6e8cf51a42bf549fd674a22ea3c/output.pkl
  31. 13
      cachedir/joblib/run/GridSearchForModels/func_code.py
  32. 25
      frontend/src/components/AlgorithmHyperParam.vue
  33. 26
      frontend/src/components/Algorithms.vue
  34. 2
      frontend/src/components/BarChart.vue
  35. 2
      frontend/src/components/DataSpace.vue
  36. 2
      frontend/src/components/FinalResultsLinePlot.vue
  37. 2
      frontend/src/components/Main.vue
  38. 2
      frontend/src/components/Parameters.vue
  39. 2
      frontend/src/components/PredictionsSpace.vue
  40. 2
      frontend/src/components/Provenance.vue
  41. 1
      frontend/src/components/ScatterPlot.vue
  42. 113
      run.py

Binary file not shown.

@ -0,0 +1 @@
{"duration": 225.68257093429565, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LogisticRegression(C=1.925, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=200,\n multi_class='auto', n_jobs=None, penalty='none',\n random_state=None, solver='saga', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [0.5, 0.575, 0.6499999999999999, 0.7249999999999999, 0.7999999999999998, 0.8749999999999998, 0.9499999999999997, 1.0249999999999997, 1.0999999999999996, 1.1749999999999996, 1.2499999999999996, 1.3249999999999995, 1.3999999999999995, 1.4749999999999994, 1.5499999999999994, 1.6249999999999993, 1.6999999999999993, 1.7749999999999992, 1.8499999999999992, 1.9249999999999992], 'max_iter': [50, 100, 150, 200], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "1816"}}

@ -0,0 +1 @@
{"duration": 23.182815074920654, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "QuadraticDiscriminantAnalysis(priors=None, reg_param=49, store_covariance=False,\n tol=0.00051)", "params": "{'reg_param': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49], 'tol': [1e-05, 0.00051]}", "eachAlgor": "'QDA'", "AlgorithmsIDsEnd": "2716"}}

@ -0,0 +1 @@
{"duration": 608.9179818630219, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=139,\n n_jobs=None, oob_score=False, random_state=None, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'ExtraT'", "AlgorithmsIDsEnd": "3036"}}

@ -0,0 +1 @@
{"duration": 829.405121088028, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "GradientBoostingClassifier(ccp_alpha=0.0, criterion='mae', init=None,\n learning_rate=0.23, loss='deviance', max_depth=3,\n max_features=None, max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=109,\n n_iter_no_change=None, presort='deprecated',\n random_state=None, subsample=1.0, tol=0.0001,\n validation_fraction=0.1, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109], 'learning_rate': [0.01, 0.12, 0.23], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'GradB'", "AlgorithmsIDsEnd": "3356"}}

@ -0,0 +1 @@
{"duration": 112.00387978553772, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "MLPClassifier(activation='tanh', alpha=0.00081, batch_size='auto', beta_1=0.9,\n beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant',\n learning_rate_init=0.001, max_fun=15000, max_iter=100,\n momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True,\n power_t=0.5, random_state=None, shuffle=True, solver='sgd',\n tol=0.00051, validation_fraction=0.1, verbose=False,\n warm_start=False)", "params": "{'alpha': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001], 'tol': [1e-05, 0.00051], 'max_iter': [100], 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver': ['adam', 'sgd']}", "eachAlgor": "'MLP'", "AlgorithmsIDsEnd": "1736"}}

@ -0,0 +1 @@
{"duration": 457.51465487480164, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "AdaBoostClassifier(algorithm='SAMME', base_estimator=None, learning_rate=1.2,\n n_estimators=79, random_state=None)", "params": "{'n_estimators': [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79], 'learning_rate': [0.1, 1.2000000000000002], 'algorithm': ['SAMME.R', 'SAMME']}", "eachAlgor": "'AdaB'", "AlgorithmsIDsEnd": "3196"}}

@ -0,0 +1 @@
{"duration": 476.14403200149536, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "SVC(C=4.39, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='scale', kernel='sigmoid',\n max_iter=-1, probability=True, random_state=None, shrinking=True, tol=0.001,\n verbose=False)", "params": "{'C': [0.1, 0.21000000000000002, 0.32000000000000006, 0.43000000000000005, 0.54, 0.65, 0.7600000000000001, 0.8700000000000001, 0.9800000000000001, 1.09, 1.2000000000000002, 1.3100000000000003, 1.4200000000000004, 1.5300000000000002, 1.6400000000000003, 1.7500000000000002, 1.8600000000000003, 1.9700000000000004, 2.08, 2.1900000000000004, 2.3000000000000003, 2.4100000000000006, 2.5200000000000005, 2.6300000000000003, 2.7400000000000007, 2.8500000000000005, 2.9600000000000004, 3.0700000000000003, 3.1800000000000006, 3.2900000000000005, 3.4000000000000004, 3.5100000000000007, 3.6200000000000006, 3.7300000000000004, 3.8400000000000007, 3.9500000000000006, 4.0600000000000005, 4.17, 4.28, 4.390000000000001], 'kernel': ['rbf', 'linear', 'poly', 'sigmoid']}", "eachAlgor": "'SVC'", "AlgorithmsIDsEnd": "576"}}

@ -1 +0,0 @@
{"duration": 2253.491542816162, "input_args": {"XData": " Pregnan Glucose BloodPress SkinThick Insulin BMI DPF Age\n0 8 183 64 0 0 23.3 0.672 32\n1 2 197 70 45 543 30.5 0.158 53\n2 8 125 96 0 0 0.0 0.232 54\n3 10 168 74 0 0 38.0 0.537 34\n4 1 189 60 23 846 30.1 0.398 59\n.. ... ... ... ... ... ... ... ...\n763 9 89 62 0 0 22.5 0.142 33\n764 2 122 70 27 0 36.8 0.340 27\n765 10 101 76 48 180 32.9 0.171 63\n766 5 121 72 23 112 26.2 0.245 30\n767 1 93 70 31 0 30.4 0.315 23\n\n[768 rows x 8 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "KNeighborsClassifier(algorithm='ball_tree', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=None, n_neighbors=24, p=2,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], 'weights': ['uniform', 'distance'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -0,0 +1 @@
{"duration": 852.4920110702515, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "GradientBoostingClassifier(ccp_alpha=0.0, criterion='mae', init=None,\n learning_rate=0.23, loss='deviance', max_depth=3,\n max_features=None, max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=109,\n n_iter_no_change=None, presort='deprecated',\n random_state=None, subsample=1.0, tol=0.0001,\n validation_fraction=0.1, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109], 'learning_rate': [0.01, 0.12, 0.23], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'GPC'", "AlgorithmsIDsEnd": "3356"}}

@ -0,0 +1 @@
{"duration": 887.560515165329, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "GradientBoostingClassifier(ccp_alpha=0.0, criterion='mae', init=None,\n learning_rate=0.23, loss='deviance', max_depth=3,\n max_features=None, max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=109,\n n_iter_no_change=None, presort='deprecated',\n random_state=None, subsample=1.0, tol=0.0001,\n validation_fraction=0.1, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109], 'learning_rate': [0.01, 0.12, 0.23], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'BaggingClassifier'", "AlgorithmsIDsEnd": "3356"}}

@ -1 +0,0 @@
{"duration": 1371.6013679504395, "input_args": {"XData": " Pregnan Glucose BloodPress SkinThick Insulin BMI DPF Age\n0 8 183 64 0 0 23.3 0.672 32\n1 2 197 70 45 543 30.5 0.158 53\n2 8 125 96 0 0 0.0 0.232 54\n3 10 168 74 0 0 38.0 0.537 34\n4 1 189 60 23 846 30.1 0.398 59\n.. ... ... ... ... ... ... ... ...\n763 9 89 62 0 0 22.5 0.142 33\n764 2 122 70 27 0 36.8 0.340 27\n765 10 101 76 48 180 32.9 0.171 63\n766 5 121 72 23 112 26.2 0.245 30\n767 1 93 70 31 0 30.4 0.315 23\n\n[768 rows x 8 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "clf": "RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=119,\n n_jobs=None, oob_score=False, random_state=None,\n verbose=0, warm_start=False)", "params": "{'n_estimators': [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'RF'", "AlgorithmsIDsEnd": "576"}}

@ -0,0 +1 @@
{"duration": 34.42876172065735, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=0.99,\n solver='eigen', store_covariance=False, tol=0.0001)", "params": "{'shrinkage': [0.0, 0.018, 0.036, 0.05399999999999999, 0.072, 0.09, 0.10799999999999998, 0.126, 0.144, 0.16199999999999998, 0.18, 0.19799999999999998, 0.21599999999999997, 0.23399999999999999, 0.252, 0.26999999999999996, 0.288, 0.306, 0.32399999999999995, 0.34199999999999997, 0.36, 0.37799999999999995, 0.39599999999999996, 0.414, 0.43199999999999994, 0.44999999999999996, 0.46799999999999997, 0.486, 0.504, 0.5219999999999999, 0.5399999999999999, 0.5579999999999999, 0.576, 0.594, 0.612, 0.63, 0.6479999999999999, 0.6659999999999999, 0.6839999999999999, 0.702, 0.72, 0.738, 0.7559999999999999, 0.7739999999999999, 0.7919999999999999, 0.8099999999999999, 0.828, 0.846, 0.8639999999999999, 0.8819999999999999, 0.8999999999999999, 0.9179999999999999, 0.9359999999999999, 0.954, 0.972, 0.9899999999999999], 'solver': ['lsqr', 'eigen']}", "eachAlgor": "'LDA'", "AlgorithmsIDsEnd": "2536"}}

File diff suppressed because one or more lines are too long

@ -0,0 +1 @@
{"duration": 316.6607279777527, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "KNeighborsClassifier(algorithm='ball_tree', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=None, n_neighbors=24, p=2,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

@ -0,0 +1 @@
{"duration": 703.3166279792786, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=139,\n n_jobs=None, oob_score=False, random_state=None,\n verbose=0, warm_start=False)", "params": "{'n_estimators': [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'RF'", "AlgorithmsIDsEnd": "2876"}}

@ -1,4 +1,4 @@
# first line: 466
# first line: 510
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
@ -78,6 +78,7 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 10
@ -117,9 +118,13 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
resultsLogLoss.append(log_loss(yData, yPredict, normalize = True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
@ -139,7 +144,7 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLoss)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()

@ -35,7 +35,7 @@ export default {
PCPView () {
d3.selectAll("#PCP > *").remove();
if (this.selAlgorithm != '') {
var colors = ['#8dd3c7','#8da0cb']
var colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928']
var colorGiv = 0
var factorsLocal = this.factors
@ -47,21 +47,6 @@ export default {
var Mc1 = []
const performanceAlg1 = JSON.parse(this.ModelsPerformance[6])
var max
var min
for (let j = 0; j < Object.values(performanceAlg1['mean_test_accuracy']).length; j++) {
if (j == 0) {
max = Object.values(performanceAlg1['log_loss'])[j]
min = Object.values(performanceAlg1['log_loss'])[j]
}
if (Object.values(performanceAlg1['log_loss'])[j] > max) {
max = Object.values(performanceAlg1['log_loss'])[j]
}
if (Object.values(performanceAlg1['log_loss'])[j] < min) {
min = Object.values(performanceAlg1['log_loss'])[j]
}
}
for (let j = 0; j < Object.values(performanceAlg1['mean_test_accuracy']).length; j++) {
let sum
@ -69,7 +54,7 @@ export default {
+ (factorsLocal[5] * Object.values(performanceAlg1['geometric_mean_score_weighted'])[j]) + (factorsLocal[6] * Object.values(performanceAlg1['mean_test_precision_micro'])[j]) + (factorsLocal[7] * Object.values(performanceAlg1['mean_test_precision_macro'])[j]) + (factorsLocal[8] * Object.values(performanceAlg1['mean_test_precision_weighted'])[j]) + (factorsLocal[9] * Object.values(performanceAlg1['mean_test_recall_micro'])[j])
+ (factorsLocal[10] * Object.values(performanceAlg1['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlg1['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlg1['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlg1['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlg1['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlg1['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlg1['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlg1['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlg1['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlg1['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlg1['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlg1['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlg1['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - ((max - Object.values(performanceAlg1['log_loss'])[j])/(max - min))))
+ (factorsLocal[22] * Object.values(performanceAlg1['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlg1['log_loss'])[j]))
Mc1.push((sum/divide)*100)
}
@ -81,7 +66,7 @@ export default {
+ (factorsLocal[5] * Object.values(performanceAlg2['geometric_mean_score_weighted'])[j]) + (factorsLocal[6] * Object.values(performanceAlg2['mean_test_precision_micro'])[j]) + (factorsLocal[7] * Object.values(performanceAlg2['mean_test_precision_macro'])[j]) + (factorsLocal[8] * Object.values(performanceAlg2['mean_test_precision_weighted'])[j]) + (factorsLocal[9] * Object.values(performanceAlg2['mean_test_recall_micro'])[j])
+ (factorsLocal[10] * Object.values(performanceAlg2['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlg2['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlg2['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlg2['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlg2['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlg2['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlg2['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlg2['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlg2['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlg2['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlg2['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlg2['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlg2['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - ((max - Object.values(performanceAlg2['log_loss'])[j])/(max - min))))
+ (factorsLocal[22] * Object.values(performanceAlg2['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlg2['log_loss'])[j]))
Mc2.push((sum2/divide)*100)
}
@ -103,10 +88,10 @@ export default {
for (var i = 0; i < valuesPerf.length; i++) {
if (this.selAlgorithm === 'KNN') {
// There is a problem here!
newObjectsParams.push({model: i,'perf_metrics': Mc1[i],'n_neighbors':ObjectsParams[i].n_neighbors,'metric':ObjectsParams[i].metric,'algorithm':ObjectsParams[i].algorithm,'weights':ObjectsParams[i].weights})
newObjectsParams.push({model: i,'performance (%)': Mc1[i],'n_neighbors':ObjectsParams[i].n_neighbors,'metric':ObjectsParams[i].metric,'algorithm':ObjectsParams[i].algorithm,'weights':ObjectsParams[i].weights})
ArrayCombined[i] = newObjectsParams[i]
} else {
newObjectsParams2.push({model: this.KNNModels + i,'perf_metrics': Mc2[i],'n_estimators':ObjectsParams[i].n_estimators,'criterion':ObjectsParams[i].criterion})
newObjectsParams2.push({model: this.KNNModels + i,'performance (%)': Mc2[i],'n_estimators':ObjectsParams[i].n_estimators,'criterion':ObjectsParams[i].criterion})
ArrayCombined[i] = newObjectsParams2[i]
}
}

@ -58,24 +58,8 @@ export default {
divide = element + divide
});
var max
var min
var Mc1 = []
const performanceAlg1 = JSON.parse(this.PerformanceAllModels[6])
console.log(performanceAlg1)
for (let j = 0; j < Object.values(performanceAlg1['mean_test_accuracy']).length; j++) {
if (j == 0) {
max = Object.values(performanceAlg1['log_loss'])[j]
min = Object.values(performanceAlg1['log_loss'])[j]
}
if (Object.values(performanceAlg1['log_loss'])[j] > max) {
max = Object.values(performanceAlg1['log_loss'])[j]
}
if (Object.values(performanceAlg1['log_loss'])[j] < min) {
min = Object.values(performanceAlg1['log_loss'])[j]
}
}
for (let j = 0; j < Object.values(performanceAlg1['mean_test_accuracy']).length; j++) {
let sum
@ -83,7 +67,7 @@ export default {
+ (factorsLocal[5] * Object.values(performanceAlg1['geometric_mean_score_weighted'])[j]) + (factorsLocal[6] * Object.values(performanceAlg1['mean_test_precision_micro'])[j]) + (factorsLocal[7] * Object.values(performanceAlg1['mean_test_precision_macro'])[j]) + (factorsLocal[8] * Object.values(performanceAlg1['mean_test_precision_weighted'])[j]) + (factorsLocal[9] * Object.values(performanceAlg1['mean_test_recall_micro'])[j])
+ (factorsLocal[10] * Object.values(performanceAlg1['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlg1['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlg1['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlg1['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlg1['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlg1['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlg1['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlg1['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlg1['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlg1['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlg1['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlg1['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlg1['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - ((max - Object.values(performanceAlg1['log_loss'])[j])/(max - min))))
+ (factorsLocal[22] * Object.values(performanceAlg1['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlg1['log_loss'])[j]))
Mc1.push((sum/divide)*100)
}
@ -95,7 +79,7 @@ export default {
+ (factorsLocal[5] * Object.values(performanceAlg2['geometric_mean_score_weighted'])[j]) + (factorsLocal[6] * Object.values(performanceAlg2['mean_test_precision_micro'])[j]) + (factorsLocal[7] * Object.values(performanceAlg2['mean_test_precision_macro'])[j]) + (factorsLocal[8] * Object.values(performanceAlg2['mean_test_precision_weighted'])[j]) + (factorsLocal[9] * Object.values(performanceAlg2['mean_test_recall_micro'])[j])
+ (factorsLocal[10] * Object.values(performanceAlg2['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlg2['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlg2['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlg2['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlg2['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlg2['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlg2['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlg2['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlg2['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlg2['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlg2['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlg2['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlg2['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - ((max - Object.values(performanceAlg2['log_loss'])[j])/(max - min))))
+ (factorsLocal[22] * Object.values(performanceAlg2['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlg2['log_loss'])[j]))
Mc2.push((sum2/divide)*100)
}
@ -134,7 +118,7 @@ export default {
this.chart('#exploding_boxplot')
// colorscale
const previousColor = ['#8dd3c7','#8da0cb']
const previousColor = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928']
// check for brushing
var el = document.getElementsByClassName('d3-exploding-boxplot boxcontent')
var overall = document.getElementsByClassName('overall')
@ -199,7 +183,7 @@ export default {
var limiter = this.chart.returnBrush()
var algorithm = []
const previousColor = ['#8dd3c7','#8da0cb']
const previousColor = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928']
var modelsActive = []
for (var j = 0; j < this.AllAlgorithms.length; j++) {
algorithm = []
@ -267,7 +251,7 @@ export default {
} else {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point RF')
}
const previousColor = ['#8dd3c7','#8da0cb']
const previousColor = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928']
var modelsActive = []
for (let j = 0; j < this.brushedBoxPl.length; j++) {
modelsActive.push(this.brushedBoxPl[j].model)

@ -20,7 +20,7 @@ export default {
modelsSelectedinBar: [],
factors: [1,1,1,1,1],
KNNModels: 576, //KNN models,
colorsValues: ['#6a3d9a','#b15928','#e31a1c'],
colorsValues: ['#b3e2cd','#fdcdac','#cbd5e8','#f4cae4','#e6f5c9','#fff2ae','#f1e2cc'],
WH: []
}
},

@ -64,7 +64,7 @@ export default {
restoreData: 'Restore Step',
userSelectedFilter: 'mean',
responsiveWidthHeight: [],
colorsValues: ['#6a3d9a','#b15928','#e31a1c']
colorsValues: ['#b3e2cd','#fdcdac','#cbd5e8','#f4cae4','#e6f5c9','#fff2ae','#f1e2cc']
}
},
methods: {

@ -144,7 +144,7 @@ export default {
text: '',
hoverinfo: 'text',
fill: "tozerox",
fillcolor: "rgba(55,126,184)",
fillcolor: "rgba(55,126,184,0)",
line: {color: "transparent"},
name: "Active Precision",
showlegend: false,

@ -210,7 +210,7 @@ export default Vue.extend({
ClassifierIDsList: '',
SelectedFeaturesPerClassifier: '',
FinalResults: 0,
Algorithms: ['KNN','RF'],
Algorithms: ['GPC','KNN','SVC','GausNB','MLP','LR','LDA','QDA','RF','ExtraT','BaggingClassifier','AdaB','GradB'],
selectedAlgorithm: '',
PerformancePerModel: '',
PerformanceCheck: '',

@ -455,7 +455,7 @@ export default {
//////////////////////////////////////////////////////////////
var color = d3.scale.ordinal()
.range(["#b3cde3","#fbb4ae"]);
.range(["#808000","#008080"]);
var radarChartOptions = {
w: width,

@ -26,7 +26,7 @@ export default {
UpdatedData: '',
representationDef: 'mds',
representationSelection: 'mds',
colorsValues: ['#6a3d9a','#b15928','#e31a1c'],
colorsValues: ['#b3e2cd','#fdcdac','#cbd5e8','#f4cae4','#e6f5c9','#fff2ae','#f1e2cc'],
WH: []
}
},

@ -85,7 +85,7 @@ export default {
let isotypes = Stardust.mark.create(isotype, this.platform);
let isotypeHeight = 18;
let colors = [[141,211,199], [141,160,203]];
let colors = [[166,206,227], [31,120,180], [178,223,138], [51,160,44], [251,154,153], [227,26,28], [253,191,111], [255,127,0], [202,178,214], [106,61,154], [255,255,153], [177,89,40]];
colors = colors.map(x => [x[0] / 255, x[1] / 255, x[2] / 255, 1]);
let pScale = Stardust.scale.custom(`

@ -60,6 +60,7 @@ export default {
ScatterPlotView () {
Plotly.purge('OverviewPlotly')
var colorsforScatterPlot = JSON.parse(this.ScatterPlotResults[0])
console.log(colorsforScatterPlot)
var MDSData = JSON.parse(this.ScatterPlotResults[1])
var parameters = JSON.parse(this.ScatterPlotResults[2])
var TSNEData = JSON.parse(this.ScatterPlotResults[12])

113
run.py

@ -15,11 +15,14 @@ from joblib import Memory
from itertools import chain
import ast
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from yellowbrick.regressor import CooksDistance
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier # 5 ensemble models
from sklearn.calibration import CalibratedClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
@ -66,7 +69,7 @@ def Reset():
RANDOM_SEED = 42
global factors
factors = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
factors = [1,1,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global XData
XData = []
@ -444,14 +447,55 @@ def RetrieveModel():
# loop through the algorithms
global allParametersPerformancePerModel
for eachAlgor in algorithms:
print(eachAlgor)
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'weights': ['uniform', 'distance'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski']}
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
else:
clf = RandomForestClassifier()
params = {'n_estimators': list(range(40, 120)), 'criterion': ['gini', 'entropy']}
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = 576
elif (eachAlgor) == 'GausNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000001))}
AlgorithmsIDsEnd = 736
elif (eachAlgor) == 'MLP':
clf = MLPClassifier()
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0005)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = 1736
elif (eachAlgor) == 'LR':
clf = LogisticRegression()
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = 1816
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.018)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = 2536
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(range(1, 50)), 'tol': list(np.arange(0.00001,0.001,0.0005))}
AlgorithmsIDsEnd = 2716
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier()
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = 2876
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier()
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = 3036
elif (eachAlgor) == 'BagC':
clf = BaggingClassifier()
params = {'n_estimators': list(range(90,110)), 'base_estimator': ['KNeighborsClassifier()', 'DummyClassifier()', 'DecisionTreeClassifier()', 'SVC()', 'BernoulliNB()', 'LogisticRegression()', 'Ridge()', 'Perceptron()', 'LDA()','QDA()']}
AlgorithmsIDsEnd = 1896
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier()
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = 3196
else:
clf = GradientBoostingClassifier()
params = {'n_estimators': list(range(90, 110)), 'learning_rate': list(np.arange(0.01,0.34,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = 3356
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd)
# call the function that sends the results to the frontend
@ -542,6 +586,7 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 10
@ -581,9 +626,13 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
resultsLogLoss.append(log_loss(yData, yPredict, normalize = True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
@ -603,7 +652,7 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLoss)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
@ -890,30 +939,52 @@ def preProcessFeatSc():
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
print(loopThroughMetrics)
for row in loopThroughMetrics.iterrows():
rowSum = 0
lengthFactors = len(scoring)
name, values = row
for loop, elements in enumerate(values):
lengthFactors = lengthFactors - 1 + factors[loop]
rowSum = elements*factors[loop] + rowSum
if lengthFactors is 0:
if sum(factors) is 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/lengthFactors)
sumPerClassifier.append(rowSum/sum(factors))
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'].sum()/loopThroughMetrics['mean_test_accuracy'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_neg_mean_absolute_error'].sum()/loopThroughMetrics['mean_test_neg_mean_absolute_error'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_neg_root_mean_squared_error'].sum()/loopThroughMetrics['mean_test_neg_root_mean_squared_error'].count())
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'].sum()/loopThroughMetrics['geometric_mean_score_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'].sum()/loopThroughMetrics['geometric_mean_score_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'].sum()/loopThroughMetrics['geometric_mean_score_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'].sum()/loopThroughMetrics['mean_test_precision_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_jaccard'].sum()/loopThroughMetrics['mean_test_jaccard'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'].sum()/loopThroughMetrics['mean_test_precision_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'].sum()/loopThroughMetrics['mean_test_precision_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'].sum()/loopThroughMetrics['mean_test_recall_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'].sum()/loopThroughMetrics['mean_test_recall_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'].sum()/loopThroughMetrics['mean_test_recall_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['f5_micro'].sum()/loopThroughMetrics['f5_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['f5_macro'].sum()/loopThroughMetrics['f5_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'].sum()/loopThroughMetrics['f5_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['f1_micro'].sum()/loopThroughMetrics['f1_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['f1_macro'].sum()/loopThroughMetrics['f1_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'].sum()/loopThroughMetrics['f1_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['f2_micro'].sum()/loopThroughMetrics['f2_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['f2_macro'].sum()/loopThroughMetrics['f2_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'].sum()/loopThroughMetrics['f2_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'].sum()/loopThroughMetrics['matthews_corrcoef'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'].sum()/loopThroughMetrics['mean_test_roc_auc_ovo_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['log_loss'].sum()/loopThroughMetrics['log_loss'].count())
for index, metric in enumerate(metricsPerModelColl):
metricsPerModelColl[index] = metric*factors[index]
if (index == 1 or index == 2):
metricsPerModelColl[index] = (metric + 1)*factors[index]
elif (index == 23):
metricsPerModelColl[index] = (1 - metric)*factors[index]
else:
metricsPerModelColl[index] = metric*factors[index]
return metricsPerModelColl
def preProceModels():
@ -932,7 +1003,7 @@ def FunTsne (data):
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=5, random_state=RANDOM_SEED).fit(data)
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]

Loading…
Cancel
Save