NEW UDPATES

master
parent b4cc0f94e0
commit 0047c1e567
  1. BIN
      __pycache__/run.cpython-37.pyc
  2. 700
      breast-cancer-wisconsin.csv
  3. 2
      cachedir/joblib/run/GridSearchForModels/03316dae023356b400b64061e59b4761/metadata.json
  4. BIN
      cachedir/joblib/run/GridSearchForModels/14b7a6af511f576047b7a4433376c746/output.pkl
  5. BIN
      cachedir/joblib/run/GridSearchForModels/173781222c1a10e0c1c932908218f2f0/output.pkl
  6. BIN
      cachedir/joblib/run/GridSearchForModels/1fe55cc0b30f8f663d1bc8b0b49c1cac/output.pkl
  7. BIN
      cachedir/joblib/run/GridSearchForModels/2c4edd3fe62fd40568ca7f7561419075/output.pkl
  8. 2
      cachedir/joblib/run/GridSearchForModels/3294c2d73e0b69b7e2c3488c95dbb85d/metadata.json
  9. 1
      cachedir/joblib/run/GridSearchForModels/37519dd894d05d61acea33c7629ad5df/metadata.json
  10. 1
      cachedir/joblib/run/GridSearchForModels/46b13a7726ed8eeee91902d7222493c3/metadata.json
  11. 1
      cachedir/joblib/run/GridSearchForModels/5d1c5fed33f062d128fe27abdff1a588/metadata.json
  12. 2
      cachedir/joblib/run/GridSearchForModels/74ee6e4e2d4e95a538931d6c565590f3/metadata.json
  13. BIN
      cachedir/joblib/run/GridSearchForModels/82f223a869481609fe37b291b6e15bc2/output.pkl
  14. BIN
      cachedir/joblib/run/GridSearchForModels/836902d529892c54aae1d4228d3c846f/output.pkl
  15. 1
      cachedir/joblib/run/GridSearchForModels/865dc4b6eb9ed2491a5fe265ff3a53c2/metadata.json
  16. 1
      cachedir/joblib/run/GridSearchForModels/9e693891c6d22c678734887be2a6282f/metadata.json
  17. 2
      cachedir/joblib/run/GridSearchForModels/a46dcd4e232a8dcae4536a2e2da671aa/metadata.json
  18. BIN
      cachedir/joblib/run/GridSearchForModels/a8cc5caea6831da1d9820dd1a74d5c71/output.pkl
  19. BIN
      cachedir/joblib/run/GridSearchForModels/aaf8008b46bc250a1ce9f54973d7ef9a/output.pkl
  20. 2
      cachedir/joblib/run/GridSearchForModels/ab217966ac94ecfcb1f16130f136aadd/metadata.json
  21. BIN
      cachedir/joblib/run/GridSearchForModels/ab221a8a839cf8aa754a8871de928b9f/output.pkl
  22. 2
      cachedir/joblib/run/GridSearchForModels/abcb3eec64dd8c6498d463980055e83b/metadata.json
  23. 1
      cachedir/joblib/run/GridSearchForModels/b26d47976b47db54818d974819f582b9/metadata.json
  24. BIN
      cachedir/joblib/run/GridSearchForModels/cf15d7acca337b9036c117dda0a7233d/output.pkl
  25. BIN
      cachedir/joblib/run/GridSearchForModels/db7bcaa025115a8c68aea12b37d63eed/output.pkl
  26. 1
      cachedir/joblib/run/GridSearchForModels/dc2413a05bbf24b34447fc94cb3166f4/metadata.json
  27. 1
      cachedir/joblib/run/GridSearchForModels/ec1b794b874ed8bd2511d49545431aa6/metadata.json
  28. 2
      cachedir/joblib/run/GridSearchForModels/f37dfc7f4090db9277c351e400a7579c/metadata.json
  29. 12
      cachedir/joblib/run/GridSearchForModels/func_code.py
  30. 769
      diabetes.csv
  31. 855
      frontend/package-lock.json
  32. 53
      frontend/package.json
  33. 73
      frontend/src/components/AlgorithmHyperParam.vue
  34. 114
      frontend/src/components/Algorithms.vue
  35. 12
      frontend/src/components/BalancePredictions.vue
  36. 63
      frontend/src/components/BarChart.vue
  37. 93
      frontend/src/components/DataSpace.vue
  38. 46
      frontend/src/components/Export.vue
  39. 3
      frontend/src/components/Heatmap.vue
  40. 49
      frontend/src/components/Main.vue
  41. 17
      frontend/src/components/PCPData.vue
  42. 5
      frontend/src/components/Parameters.vue
  43. 125
      frontend/src/components/PerMetricBarChart.vue
  44. 55
      frontend/src/components/PredictionsSpace.vue
  45. 25
      frontend/src/components/Provenance.vue
  46. 1
      frontend/src/components/ResetClass.vue
  47. 114
      frontend/src/components/ScatterPlot.vue
  48. 24
      insertMongo.py
  49. 152
      iris.csv
  50. 476
      run.py

Binary file not shown.

@ -0,0 +1,700 @@
clump_thic,size_un,shape_un,marg_adh,epith_size,bare_nuc,bland_chr,nor_nuc,mitoses,class*
5,1,1,1,2,1,3,1,1,Benign
5,4,4,5,7,10,3,2,1,Benign
3,1,1,1,2,2,3,1,1,Benign
6,8,8,1,3,4,3,7,1,Benign
4,1,1,3,2,1,3,1,1,Benign
8,10,10,8,7,10,9,7,1,Malignant
1,1,1,1,2,10,3,1,1,Benign
2,1,2,1,2,1,3,1,1,Benign
2,1,1,1,2,1,1,1,5,Benign
4,2,1,1,2,1,2,1,1,Benign
1,1,1,1,1,1,3,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
5,3,3,3,2,3,4,4,1,Malignant
1,1,1,1,2,3,3,1,1,Benign
8,7,5,10,7,9,5,5,4,Malignant
7,4,6,4,6,1,4,3,1,Malignant
4,1,1,1,2,1,2,1,1,Benign
4,1,1,1,2,1,3,1,1,Benign
10,7,7,6,4,10,4,1,2,Malignant
6,1,1,1,2,1,3,1,1,Benign
7,3,2,10,5,10,5,4,4,Malignant
10,5,5,3,6,7,7,10,1,Malignant
3,1,1,1,2,1,2,1,1,Benign
8,4,5,1,2,4,7,3,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
5,2,3,4,2,7,3,6,1,Malignant
3,2,1,1,1,1,2,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
1,1,3,1,2,1,1,1,1,Benign
3,1,1,1,1,1,2,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
10,7,7,3,8,5,7,4,3,Malignant
2,1,1,2,2,1,3,1,1,Benign
3,1,2,1,2,1,2,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
10,10,10,8,6,1,8,9,1,Malignant
6,2,1,1,1,1,7,1,1,Benign
5,4,4,9,2,10,5,6,1,Malignant
2,5,3,3,6,7,7,5,1,Malignant
6,6,6,9,6,4,7,8,1,Benign
10,4,3,1,3,3,6,5,2,Malignant
6,10,10,2,8,10,7,3,3,Malignant
5,6,5,6,10,1,3,1,1,Malignant
10,10,10,4,8,1,8,10,1,Malignant
1,1,1,1,2,1,2,1,2,Benign
3,7,7,4,4,9,4,8,1,Malignant
1,1,1,1,2,1,2,1,1,Benign
4,1,1,3,2,1,3,1,1,Benign
7,8,7,2,4,8,3,8,2,Malignant
9,5,8,1,2,3,2,1,5,Malignant
5,3,3,4,2,4,3,4,1,Malignant
10,3,6,2,3,5,4,10,2,Malignant
5,5,5,8,10,8,7,3,7,Malignant
10,5,5,6,8,8,7,1,1,Malignant
10,6,6,3,4,5,3,6,1,Malignant
8,10,10,1,3,6,3,9,1,Malignant
8,2,4,1,5,1,5,4,4,Malignant
5,2,3,1,6,10,5,1,1,Malignant
9,5,5,2,2,2,5,1,1,Malignant
5,3,5,5,3,3,4,10,1,Malignant
1,1,1,1,2,2,2,1,1,Benign
9,10,10,1,10,8,3,3,1,Malignant
6,3,4,1,5,2,3,9,1,Malignant
1,1,1,1,2,1,2,1,1,Benign
10,4,2,1,3,2,4,3,10,Malignant
4,1,1,1,2,1,3,1,1,Benign
5,3,4,1,8,10,4,9,1,Malignant
8,3,8,3,4,9,8,9,8,Malignant
1,1,1,1,2,1,3,2,1,Benign
5,1,3,1,2,1,2,1,1,Benign
6,10,2,8,10,2,7,8,10,Malignant
1,3,3,2,2,1,7,2,1,Benign
9,4,5,10,6,10,4,8,1,Malignant
10,6,4,1,3,4,3,2,3,Malignant
1,1,2,1,2,2,4,2,1,Benign
1,1,4,1,2,1,2,1,1,Benign
5,3,1,2,2,1,2,1,1,Benign
3,1,1,1,2,3,3,1,1,Benign
2,1,1,1,3,1,2,1,1,Benign
2,2,2,1,1,1,7,1,1,Benign
4,1,1,2,2,1,2,1,1,Benign
5,2,1,1,2,1,3,1,1,Benign
3,1,1,1,2,2,7,1,1,Benign
3,5,7,8,8,9,7,10,7,Malignant
5,10,6,1,10,4,4,10,10,Malignant
3,3,6,4,5,8,4,4,1,Malignant
3,6,6,6,5,10,6,8,3,Malignant
4,1,1,1,2,1,3,1,1,Benign
2,1,1,2,3,1,2,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
3,1,1,2,2,1,1,1,1,Benign
4,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
2,1,1,2,2,1,1,1,1,Benign
5,1,1,1,2,1,3,1,1,Benign
9,6,9,2,10,6,2,9,10,Malignant
7,5,6,10,5,10,7,9,4,Malignant
10,3,5,1,10,5,3,10,2,Malignant
2,3,4,4,2,5,2,5,1,Malignant
4,1,2,1,2,1,3,1,1,Benign
8,2,3,1,6,3,7,1,1,Malignant
10,10,10,10,10,1,8,8,8,Malignant
7,3,4,4,3,3,3,2,7,Malignant
10,10,10,8,2,10,4,1,1,Malignant
1,6,8,10,8,10,5,7,1,Malignant
1,1,1,1,2,1,2,3,1,Benign
6,5,4,4,3,9,7,8,3,Malignant
1,3,1,2,2,2,5,3,2,Benign
8,6,4,3,5,9,3,1,1,Malignant
10,3,3,10,2,10,7,3,3,Malignant
10,10,10,3,10,8,8,1,1,Malignant
3,3,2,1,2,3,3,1,1,Benign
1,1,1,1,2,5,1,1,1,Benign
8,3,3,1,2,2,3,2,1,Benign
4,5,5,10,4,10,7,5,8,Malignant
1,1,1,1,4,3,1,1,1,Benign
3,2,1,1,2,2,3,1,1,Benign
1,1,2,2,2,1,3,1,1,Benign
4,2,1,1,2,2,3,1,1,Benign
10,10,10,2,10,10,5,3,3,Malignant
5,3,5,1,8,10,5,3,1,Malignant
5,4,6,7,9,7,8,10,1,Malignant
1,1,1,1,2,1,2,1,1,Benign
7,5,3,7,4,10,7,5,5,Malignant
3,1,1,1,2,1,3,1,1,Benign
8,3,5,4,5,10,1,6,2,Malignant
1,1,1,1,10,1,1,1,1,Benign
5,1,3,1,2,1,2,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
5,10,8,10,8,10,3,6,3,Malignant
3,1,1,1,2,1,2,2,1,Benign
3,1,1,1,3,1,2,1,1,Benign
5,1,1,1,2,2,3,3,1,Benign
4,1,1,1,2,1,2,1,1,Benign
3,1,1,1,2,1,1,1,1,Benign
4,1,2,1,2,1,2,1,1,Benign
1,1,1,1,1,4,2,1,1,Benign
3,1,1,1,2,1,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
9,5,5,4,4,5,4,3,3,Malignant
1,1,1,1,2,5,1,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
1,1,3,1,2,4,2,1,1,Benign
3,4,5,2,6,8,4,1,1,Malignant
1,1,1,1,3,2,2,1,1,Benign
3,1,1,3,8,1,5,8,1,Benign
8,8,7,4,10,10,7,8,7,Malignant
1,1,1,1,1,1,3,1,1,Benign
7,2,4,1,6,10,5,4,3,Malignant
10,10,8,6,4,5,8,10,1,Malignant
4,1,1,1,2,3,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
5,5,5,6,3,10,3,1,1,Malignant
1,2,2,1,2,1,2,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
1,1,2,1,3,4,1,1,1,Benign
9,9,10,3,6,10,7,10,6,Malignant
10,7,7,4,5,10,5,7,2,Malignant
4,1,1,1,2,1,3,2,1,Benign
3,1,1,1,2,1,3,1,1,Benign
1,1,1,2,1,3,1,1,7,Benign
5,1,1,1,2,4,3,1,1,Benign
4,1,1,1,2,2,3,2,1,Benign
5,6,7,8,8,10,3,10,3,Malignant
10,8,10,10,6,1,3,1,10,Malignant
3,1,1,1,2,1,3,1,1,Benign
1,1,1,2,1,1,1,1,1,Benign
3,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
6,10,10,10,8,10,10,10,7,Malignant
8,6,5,4,3,10,6,1,1,Malignant
5,8,7,7,10,10,5,7,1,Malignant
2,1,1,1,2,1,3,1,1,Benign
5,10,10,3,8,1,5,10,3,Malignant
4,1,1,1,2,1,3,1,1,Benign
5,3,3,3,6,10,3,1,1,Malignant
1,1,1,1,1,1,3,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
6,1,1,1,2,1,3,1,1,Benign
5,8,8,8,5,10,7,8,1,Malignant
8,7,6,4,4,10,5,1,1,Malignant
2,1,1,1,1,1,3,1,1,Benign
1,5,8,6,5,8,7,10,1,Malignant
10,5,6,10,6,10,7,7,10,Malignant
5,8,4,10,5,8,9,10,1,Malignant
1,2,3,1,2,1,3,1,1,Benign
10,10,10,8,6,8,7,10,1,Malignant
7,5,10,10,10,10,4,10,3,Malignant
5,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
3,1,1,1,2,1,3,1,1,Benign
4,1,1,1,2,1,3,1,1,Benign
8,4,4,5,4,7,7,8,2,Benign
5,1,1,4,2,1,3,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
9,7,7,5,5,10,7,8,3,Malignant
10,8,8,4,10,10,8,1,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
5,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
5,10,10,9,6,10,7,10,5,Malignant
10,10,9,3,7,5,3,5,1,Malignant
1,1,1,1,1,1,3,1,1,Benign
1,1,1,1,1,1,3,1,1,Benign
5,1,1,1,1,1,3,1,1,Benign
8,10,10,10,5,10,8,10,6,Malignant
8,10,8,8,4,8,7,7,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
10,10,10,10,7,10,7,10,4,Malignant
10,10,10,10,3,10,10,6,1,Malignant
8,7,8,7,5,5,5,10,2,Malignant
1,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
6,10,7,7,6,4,8,10,2,Malignant
6,1,3,1,2,1,3,1,1,Benign
1,1,1,2,2,1,3,1,1,Benign
10,6,4,3,10,10,9,10,1,Malignant
4,1,1,3,1,5,2,1,1,Malignant
7,5,6,3,3,8,7,4,1,Malignant
10,5,5,6,3,10,7,9,2,Malignant
1,1,1,1,2,1,2,1,1,Benign
10,5,7,4,4,10,8,9,1,Malignant
8,9,9,5,3,5,7,7,1,Malignant
1,1,1,1,1,1,3,1,1,Benign
10,10,10,3,10,10,9,10,1,Malignant
7,4,7,4,3,7,7,6,1,Malignant
6,8,7,5,6,8,8,9,2,Malignant
8,4,6,3,3,1,4,3,1,Benign
10,4,5,5,5,10,4,1,1,Malignant
3,3,2,1,3,1,3,6,1,Benign
3,1,4,1,2,4,3,1,1,Benign
10,8,8,2,8,10,4,8,10,Malignant
9,8,8,5,6,2,4,10,4,Malignant
8,10,10,8,6,9,3,10,10,Malignant
10,4,3,2,3,10,5,3,2,Malignant
5,1,3,3,2,2,2,3,1,Benign
3,1,1,3,1,1,3,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,5,5,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
5,1,1,2,2,2,3,1,1,Benign
8,10,10,8,5,10,7,8,1,Malignant
8,4,4,1,2,9,3,3,1,Malignant
4,1,1,1,2,1,3,6,1,Benign
3,1,1,1,2,4,3,1,1,Benign
1,2,2,1,2,1,1,1,1,Benign
10,4,4,10,2,10,5,3,3,Malignant
6,3,3,5,3,10,3,5,3,Benign
6,10,10,2,8,10,7,3,3,Malignant
9,10,10,1,10,8,3,3,1,Malignant
5,6,6,2,4,10,3,6,1,Malignant
3,1,1,1,2,1,1,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
3,1,1,1,2,1,3,1,1,Benign
5,7,7,1,5,8,3,4,1,Benign
10,5,8,10,3,10,5,1,3,Malignant
5,10,10,6,10,10,10,6,5,Malignant
8,8,9,4,5,10,7,8,1,Malignant
10,4,4,10,6,10,5,5,1,Malignant
7,9,4,10,10,3,5,3,3,Malignant
5,1,4,1,2,1,3,2,1,Benign
10,10,6,3,3,10,4,3,2,Malignant
3,3,5,2,3,10,7,1,1,Malignant
10,8,8,2,3,4,8,7,8,Malignant
1,1,1,1,2,1,3,1,1,Benign
8,4,7,1,3,10,3,9,2,Malignant
5,1,1,1,2,1,3,1,1,Benign
3,3,5,2,3,10,7,1,1,Malignant
7,2,4,1,3,4,3,3,1,Malignant
3,1,1,1,2,1,3,2,1,Benign
3,1,3,1,2,4,2,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
10,5,7,3,3,7,3,3,8,Malignant
3,1,1,1,2,1,3,1,1,Benign
2,1,1,2,2,1,3,1,1,Benign
1,4,3,10,4,10,5,6,1,Malignant
10,4,6,1,2,10,5,3,1,Malignant
7,4,5,10,2,10,3,8,2,Malignant
8,10,10,10,8,10,10,7,3,Malignant
10,10,10,10,10,10,4,10,10,Malignant
3,1,1,1,3,1,2,1,1,Benign
6,1,3,1,4,5,5,10,1,Malignant
5,6,6,8,6,10,4,10,4,Malignant
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
8,8,8,1,2,4,6,10,1,Malignant
10,4,4,6,2,10,2,3,1,Malignant
1,1,1,1,2,4,2,1,1,Benign
5,5,7,8,6,10,7,4,1,Malignant
5,3,4,3,4,5,4,7,1,Benign
5,4,3,1,2,4,2,3,1,Benign
8,2,1,1,5,1,1,1,1,Benign
9,1,2,6,4,10,7,7,2,Malignant
8,4,10,5,4,4,7,10,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
10,10,10,7,9,10,7,10,10,Malignant
1,1,1,1,2,1,3,1,1,Benign
8,3,4,9,3,10,3,3,1,Malignant
10,8,4,4,4,10,3,10,4,Malignant
1,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
7,8,7,6,4,3,8,8,4,Malignant
3,1,1,1,2,5,5,1,1,Benign
2,1,1,1,3,1,2,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
8,6,4,10,10,1,3,5,1,Malignant
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,1,1,2,1,1,Benign
4,6,5,6,7,4,4,9,1,Benign
5,5,5,2,5,10,4,3,1,Malignant
6,8,7,8,6,8,8,9,1,Malignant
1,1,1,1,5,1,3,1,1,Benign
4,4,4,4,6,5,7,3,1,Benign
7,6,3,2,5,10,7,4,6,Malignant
3,1,1,1,2,4,3,1,1,Benign
3,1,1,1,2,1,3,1,1,Benign
5,4,6,10,2,10,4,1,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
3,2,2,1,2,1,2,3,1,Benign
10,1,1,1,2,10,5,4,1,Malignant
1,1,1,1,2,1,2,1,1,Benign
8,10,3,2,6,4,3,10,1,Malignant
10,4,6,4,5,10,7,1,1,Malignant
10,4,7,2,2,8,6,1,1,Malignant
5,1,1,1,2,1,3,1,2,Benign
5,2,2,2,2,1,2,2,1,Benign
5,4,6,6,4,10,4,3,1,Malignant
8,6,7,3,3,10,3,4,2,Malignant
1,1,1,1,2,1,1,1,1,Benign
6,5,5,8,4,10,3,4,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
1,1,1,1,1,1,2,1,1,Benign
8,5,5,5,2,10,4,3,1,Malignant
10,3,3,1,2,10,7,6,1,Malignant
1,1,1,1,2,1,3,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
7,6,4,8,10,10,9,5,3,Malignant
1,1,1,1,2,1,1,1,1,Benign
5,2,2,2,3,1,1,3,1,Benign
1,1,1,1,1,1,1,3,1,Benign
3,4,4,10,5,1,3,3,1,Malignant
4,2,3,5,3,8,7,6,1,Malignant
5,1,1,3,2,1,1,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
3,4,5,3,7,3,4,6,1,Benign
2,7,10,10,7,10,4,9,4,Malignant
1,1,1,1,2,1,2,1,1,Benign
4,1,1,1,3,1,2,2,1,Benign
5,3,3,1,3,3,3,3,3,Malignant
8,10,10,7,10,10,7,3,8,Malignant
8,10,5,3,8,4,4,10,3,Malignant
10,3,5,4,3,7,3,5,3,Malignant
6,10,10,10,10,10,8,10,10,Malignant
3,10,3,10,6,10,5,1,4,Malignant
3,2,2,1,4,3,2,1,1,Benign
4,4,4,2,2,3,2,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
6,10,10,10,8,10,7,10,7,Malignant
5,8,8,10,5,10,8,10,3,Malignant
1,1,3,1,2,1,1,1,1,Benign
1,1,3,1,1,1,2,1,1,Benign
4,3,2,1,3,1,2,1,1,Benign
1,1,3,1,2,1,1,1,1,Benign
4,1,2,1,2,1,2,1,1,Benign
5,1,1,2,2,1,2,1,1,Benign
3,1,2,1,2,1,2,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
1,1,1,1,1,1,2,1,1,Benign
3,1,1,4,3,1,2,2,1,Benign
5,3,4,1,4,1,3,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
10,6,3,6,4,10,7,8,4,Malignant
3,2,2,2,2,1,3,2,1,Benign
2,1,1,1,2,1,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
3,3,2,2,3,1,1,2,3,Benign
7,6,6,3,2,10,7,1,1,Malignant
5,3,3,2,3,1,3,1,1,Benign
2,1,1,1,2,1,2,2,1,Benign
5,1,1,1,3,2,2,2,1,Benign
1,1,1,2,2,1,2,1,1,Benign
10,8,7,4,3,10,7,9,1,Malignant
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,1,1,1,1,1,Benign
1,2,3,1,2,1,2,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
3,1,1,1,2,1,3,1,1,Benign
4,1,1,1,2,1,1,1,1,Benign
3,2,1,1,2,1,2,2,1,Benign
1,2,3,1,2,1,1,1,1,Benign
3,10,8,7,6,9,9,3,8,Malignant
3,1,1,1,2,1,1,1,1,Benign
5,3,3,1,2,1,2,1,1,Benign
3,1,1,1,2,4,1,1,1,Benign
1,2,1,3,2,1,1,2,1,Benign
1,1,1,1,2,1,2,1,1,Benign
4,2,2,1,2,1,2,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
2,3,2,2,2,2,3,1,1,Benign
3,1,2,1,2,1,2,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
1,1,1,1,1,4,2,1,1,Benign
10,10,10,6,8,4,8,5,1,Malignant
5,1,2,1,2,1,3,1,1,Benign
8,5,6,2,3,10,6,6,1,Malignant
3,3,2,6,3,3,3,5,1,Benign
8,7,8,5,10,10,7,2,1,Malignant
1,1,1,1,2,1,2,1,1,Benign
5,2,2,2,2,2,3,2,2,Benign
2,3,1,1,5,1,1,1,1,Benign
3,2,2,3,2,3,3,1,1,Benign
10,10,10,7,10,10,8,2,1,Malignant
4,3,3,1,2,1,3,3,1,Benign
5,1,3,1,2,1,2,1,1,Benign
3,1,1,1,2,1,1,1,1,Benign
9,10,10,10,10,10,10,10,1,Malignant
5,3,6,1,2,1,1,1,1,Benign
8,7,8,2,4,2,5,10,1,Malignant
1,1,1,1,2,1,2,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
1,3,1,1,2,1,2,2,1,Benign
5,1,1,3,4,1,3,2,1,Benign
5,1,1,1,2,1,2,2,1,Benign
3,2,2,3,2,1,1,1,1,Benign
6,9,7,5,5,8,4,2,1,Benign
10,8,10,1,3,10,5,1,1,Malignant
10,10,10,1,6,1,2,8,1,Malignant
4,1,1,1,2,1,1,1,1,Benign
4,1,3,3,2,1,1,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
10,4,3,10,4,10,10,1,1,Malignant
5,2,2,4,2,4,1,1,1,Benign
1,1,1,3,2,3,1,1,1,Benign
1,1,1,1,2,2,1,1,1,Benign
5,1,1,6,3,1,2,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
1,1,1,1,1,1,1,1,1,Benign
5,7,9,8,6,10,8,10,1,Malignant
4,1,1,3,1,1,2,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
3,1,1,3,2,1,1,1,1,Benign
4,5,5,8,6,10,10,7,1,Malignant
2,3,1,1,3,1,1,1,1,Benign
10,2,2,1,2,6,1,1,2,Malignant
10,6,5,8,5,10,8,6,1,Malignant
8,8,9,6,6,3,10,10,1,Malignant
5,1,2,1,2,1,1,1,1,Benign
5,1,3,1,2,1,1,1,1,Benign
5,1,1,3,2,1,1,1,1,Benign
3,1,1,1,2,5,1,1,1,Benign
6,1,1,3,2,1,1,1,1,Benign
4,1,1,1,2,1,1,2,1,Benign
4,1,1,1,2,1,1,1,1,Benign
10,9,8,7,6,4,7,10,3,Malignant
10,6,6,2,4,10,9,7,1,Malignant
6,6,6,5,4,10,7,6,2,Malignant
4,1,1,1,2,1,1,1,1,Benign
1,1,2,1,2,1,2,1,1,Benign
3,1,1,1,1,1,2,1,1,Benign
6,1,1,3,2,1,1,1,1,Benign
6,1,1,1,1,1,1,1,1,Benign
4,1,1,1,2,1,1,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
3,1,1,1,2,1,1,1,1,Benign
4,1,2,1,2,1,1,1,1,Benign
4,1,1,1,2,1,1,1,1,Benign
5,2,1,1,2,1,1,1,1,Benign
4,8,7,10,4,10,7,5,1,Malignant
5,1,1,1,1,1,1,1,1,Benign
5,3,2,4,2,1,1,1,1,Benign
9,10,10,10,10,5,10,10,10,Malignant
8,7,8,5,5,10,9,10,1,Malignant
5,1,2,1,2,1,1,1,1,Benign
1,1,1,3,1,3,1,1,1,Benign
3,1,1,1,1,1,2,1,1,Benign
10,10,10,10,6,10,8,1,5,Malignant
3,6,4,10,3,3,3,4,1,Malignant
6,3,2,1,3,4,4,1,1,Malignant
1,1,1,1,2,1,1,1,1,Benign
5,8,9,4,3,10,7,1,1,Malignant
4,1,1,1,1,1,2,1,1,Benign
5,10,10,10,6,10,6,5,2,Malignant
5,1,2,10,4,5,2,1,1,Benign
3,1,1,1,1,1,2,1,1,Benign
1,1,1,1,1,1,1,1,1,Benign
4,2,1,1,2,1,1,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
6,1,1,1,2,1,3,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
4,1,1,2,2,1,2,1,1,Benign
4,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
3,3,1,1,2,1,1,1,1,Benign
8,10,10,10,7,5,4,8,7,Malignant
1,1,1,1,2,4,1,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
3,1,1,1,1,1,2,1,1,Benign
6,6,7,10,3,10,8,10,2,Malignant
4,10,4,7,3,10,9,10,1,Malignant
1,1,1,1,1,1,1,1,1,Benign
1,1,1,1,1,1,2,1,1,Benign
3,1,2,2,2,1,1,1,1,Benign
4,7,8,3,4,10,9,1,1,Malignant
1,1,1,1,3,1,1,1,1,Benign
4,1,1,1,3,1,1,1,1,Benign
10,4,5,4,3,5,7,3,1,Malignant
7,5,6,10,4,10,5,3,1,Malignant
3,1,1,1,2,1,2,1,1,Benign
3,1,1,2,2,1,1,1,1,Benign
4,1,1,1,2,1,1,1,1,Benign
4,1,1,1,2,1,3,1,1,Benign
6,1,3,2,2,1,1,1,1,Benign
4,1,1,1,1,1,2,1,1,Benign
7,4,4,3,4,10,6,9,1,Malignant
4,2,2,1,2,1,2,1,1,Benign
1,1,1,1,1,1,3,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
1,1,3,2,2,1,3,1,1,Benign
5,1,1,1,2,1,3,1,1,Benign
5,1,2,1,2,1,3,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
6,1,1,1,2,1,2,1,1,Benign
5,1,1,1,2,2,2,1,1,Benign
3,1,1,1,2,1,1,1,1,Benign
5,3,1,1,2,1,1,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
2,1,3,2,2,1,2,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
6,10,10,10,4,10,7,10,1,Malignant
2,1,1,1,1,1,1,1,1,Benign
3,1,1,1,1,1,1,1,1,Benign
7,8,3,7,4,5,7,8,2,Malignant
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
3,2,2,2,2,1,4,2,1,Benign
4,4,2,1,2,5,2,1,2,Benign
3,1,1,1,2,1,1,1,1,Benign
4,3,1,1,2,1,4,8,1,Benign
5,2,2,2,1,1,2,1,1,Benign
5,1,1,3,2,1,1,1,1,Benign
2,1,1,1,2,1,2,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
5,1,1,1,2,1,3,1,1,Benign
5,1,1,1,2,1,3,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
4,1,1,1,2,1,3,2,1,Benign
5,7,10,10,5,10,10,10,1,Malignant
3,1,2,1,2,1,3,1,1,Benign
4,1,1,1,2,3,2,1,1,Benign
8,4,4,1,6,10,2,5,2,Malignant
10,10,8,10,6,5,10,3,1,Malignant
8,10,4,4,8,10,8,2,1,Malignant
7,6,10,5,3,10,9,10,2,Malignant
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
10,9,7,3,4,2,7,7,1,Malignant
5,1,2,1,2,1,3,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,3,1,1,Benign
5,1,2,1,2,1,2,1,1,Benign
5,7,10,6,5,10,7,5,1,Malignant
6,10,5,5,4,10,6,10,1,Malignant
3,1,1,1,2,1,1,1,1,Benign
5,1,1,6,3,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
8,10,10,10,6,10,10,10,1,Malignant
5,1,1,1,2,1,2,2,1,Benign
9,8,8,9,6,3,4,1,1,Malignant
5,1,1,1,2,1,1,1,1,Benign
4,10,8,5,4,1,10,1,1,Malignant
2,5,7,6,4,10,7,6,1,Malignant
10,3,4,5,3,10,4,1,1,Malignant
5,1,2,1,2,1,1,1,1,Benign
4,8,6,3,4,10,7,1,1,Malignant
5,1,1,1,2,1,2,1,1,Benign
4,1,2,1,2,1,2,1,1,Benign
5,1,3,1,2,1,3,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
5,2,4,1,1,1,1,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,1,1,2,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
5,4,6,8,4,1,8,10,1,Malignant
5,3,2,8,5,10,8,1,2,Malignant
10,5,10,3,5,8,7,8,3,Malignant
4,1,1,2,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
5,10,10,10,10,10,10,1,1,Malignant
5,1,1,1,2,1,1,1,1,Benign
10,4,3,10,3,10,7,1,2,Malignant
5,10,10,10,5,2,8,5,1,Malignant
8,10,10,10,6,10,10,10,10,Malignant
2,3,1,1,2,1,2,1,1,Benign
2,1,1,1,1,1,2,1,1,Benign
4,1,3,1,2,1,2,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,1,4,1,1,1,Benign
4,1,1,1,2,1,2,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
6,3,3,3,3,2,6,1,1,Benign
7,1,2,3,2,1,2,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
5,1,1,2,1,1,2,1,1,Benign
3,1,3,1,3,4,1,1,1,Benign
4,6,6,5,7,6,7,7,3,Malignant
2,1,1,1,2,5,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
4,1,1,1,2,1,1,1,1,Benign
6,2,3,1,2,1,1,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
8,7,4,4,5,3,5,10,1,Malignant
3,1,1,1,2,1,1,1,1,Benign
3,1,4,1,2,1,1,1,1,Benign
10,10,7,8,7,1,10,10,3,Malignant
4,2,4,3,2,2,2,1,1,Benign
4,1,1,1,2,1,1,1,1,Benign
5,1,1,3,2,1,1,1,1,Benign
4,1,1,3,2,1,1,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
1,2,2,1,2,1,1,1,1,Benign
1,1,1,3,2,1,1,1,1,Benign
5,10,10,10,10,2,10,10,10,Malignant
3,1,1,1,2,1,2,1,1,Benign
3,1,1,2,3,4,1,1,1,Benign
1,2,1,3,2,1,2,1,1,Benign
5,1,1,1,2,1,2,2,1,Benign
4,1,1,1,2,1,2,1,1,Benign
3,1,1,1,2,1,3,1,1,Benign
3,1,1,1,2,1,2,1,1,Benign
5,1,1,1,2,1,2,1,1,Benign
5,4,5,1,8,1,3,6,1,Benign
7,8,8,7,3,10,7,2,3,Malignant
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
4,1,1,1,2,1,3,1,1,Benign
1,1,3,1,2,1,2,1,1,Benign
1,1,3,1,2,1,2,1,1,Benign
3,1,1,3,2,1,2,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
5,2,2,2,2,1,1,1,2,Benign
3,1,1,1,2,1,3,1,1,Benign
5,7,4,1,6,1,7,10,3,Malignant
5,10,10,8,5,5,7,10,1,Malignant
3,10,7,8,5,8,7,4,1,Malignant
3,2,1,2,2,1,3,1,1,Benign
2,1,1,1,2,1,3,1,1,Benign
5,3,2,1,3,1,1,1,1,Benign
1,1,1,1,2,1,2,1,1,Benign
4,1,4,1,2,1,1,1,1,Benign
1,1,2,1,2,1,2,1,1,Benign
5,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
10,10,10,10,5,10,10,10,7,Malignant
5,10,10,10,4,10,5,6,3,Malignant
5,1,1,1,2,1,3,2,1,Benign
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,1,Benign
3,1,1,1,2,1,2,3,1,Benign
4,1,1,1,2,1,1,1,1,Benign
1,1,1,1,2,1,1,1,8,Benign
1,1,1,3,2,1,1,1,1,Benign
5,10,10,5,4,5,4,4,1,Malignant
3,1,1,1,2,1,1,1,1,Benign
3,1,1,1,2,1,2,1,2,Benign
3,1,1,1,3,2,1,1,1,Benign
2,1,1,1,2,1,1,1,1,Benign
5,10,10,3,7,3,8,10,2,Malignant
4,8,6,4,3,4,10,6,1,Malignant
4,8,8,5,4,5,10,4,1,Malignant
unable to load file from base commit

@ -1 +1 @@
{"duration": 54.30657696723938, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "QuadraticDiscriminantAnalysis(priors=None, reg_param=50, store_covariance=False,\n tol=0.00081)", "params": "{'reg_param': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50], 'tol': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001]}", "eachAlgor": "'QDA'", "AlgorithmsIDsEnd": "2196"}}
{"duration": 75.22324180603027, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "QuadraticDiscriminantAnalysis(priors=None, reg_param=50, store_covariance=False,\n tol=0.00081)", "params": "{'reg_param': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50], 'tol': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001]}", "eachAlgor": "'QDA'", "AlgorithmsIDsEnd": "2196"}}

@ -1 +1 @@
{"duration": 806.5329508781433, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "GradientBoostingClassifier(ccp_alpha=0.0, criterion='mae', init=None,\n learning_rate=0.12, loss='deviance', max_depth=3,\n max_features=None, max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=114,\n n_iter_no_change=None, presort='deprecated',\n random_state=None, subsample=1.0, tol=0.0001,\n validation_fraction=0.1, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114], 'learning_rate': [0.01, 0.12], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'GradB'", "AlgorithmsIDsEnd": "2926"}}
{"duration": 934.0173509120941, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "GradientBoostingClassifier(ccp_alpha=0.0, criterion='mae', init=None,\n learning_rate=0.12, loss='deviance', max_depth=3,\n max_features=None, max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=114,\n n_iter_no_change=None, presort='deprecated',\n random_state=None, subsample=1.0, tol=0.0001,\n validation_fraction=0.1, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114], 'learning_rate': [0.01, 0.12], 'criterion': ['friedman_mse', 'mse', 'mae']}", "eachAlgor": "'GradB'", "AlgorithmsIDsEnd": "2926"}}

@ -1 +0,0 @@
{"duration": 222.3701467514038, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LogisticRegression(C=1.925, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=200,\n multi_class='auto', n_jobs=None, penalty='none',\n random_state=None, solver='saga', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [0.5, 0.575, 0.6499999999999999, 0.7249999999999999, 0.7999999999999998, 0.8749999999999998, 0.9499999999999997, 1.0249999999999997, 1.0999999999999996, 1.1749999999999996, 1.2499999999999996, 1.3249999999999995, 1.3999999999999995, 1.4749999999999994, 1.5499999999999994, 1.6249999999999993, 1.6999999999999993, 1.7749999999999992, 1.8499999999999992, 1.9249999999999992], 'max_iter': [50, 100, 150, 200], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "1356"}}

@ -1 +0,0 @@
{"duration": 465.37524604797363, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "SVC(C=4.39, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='scale', kernel='sigmoid',\n max_iter=-1, probability=True, random_state=None, shrinking=True, tol=0.001,\n verbose=False)", "params": "{'C': [0.1, 0.21000000000000002, 0.32000000000000006, 0.43000000000000005, 0.54, 0.65, 0.7600000000000001, 0.8700000000000001, 0.9800000000000001, 1.09, 1.2000000000000002, 1.3100000000000003, 1.4200000000000004, 1.5300000000000002, 1.6400000000000003, 1.7500000000000002, 1.8600000000000003, 1.9700000000000004, 2.08, 2.1900000000000004, 2.3000000000000003, 2.4100000000000006, 2.5200000000000005, 2.6300000000000003, 2.7400000000000007, 2.8500000000000005, 2.9600000000000004, 3.0700000000000003, 3.1800000000000006, 3.2900000000000005, 3.4000000000000004, 3.5100000000000007, 3.6200000000000006, 3.7300000000000004, 3.8400000000000007, 3.9500000000000006, 4.0600000000000005, 4.17, 4.28, 4.390000000000001], 'kernel': ['rbf', 'linear', 'poly', 'sigmoid']}", "eachAlgor": "'SVC'", "AlgorithmsIDsEnd": "576"}}

@ -1 +0,0 @@
{"duration": 153.8552680015564, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "MLPClassifier(activation='tanh', alpha=0.00081, batch_size='auto', beta_1=0.9,\n beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant',\n learning_rate_init=0.001, max_fun=15000, max_iter=100,\n momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True,\n power_t=0.5, random_state=None, shuffle=True, solver='sgd',\n tol=0.00081, validation_fraction=0.1, verbose=False,\n warm_start=False)", "params": "{'alpha': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001], 'tol': [1e-05, 0.00041000000000000005, 0.0008100000000000001], 'max_iter': [100], 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver': ['adam', 'sgd']}", "eachAlgor": "'MLP'", "AlgorithmsIDsEnd": "1236"}}

@ -1 +1 @@
{"duration": 604.6331548690796, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=139,\n n_jobs=None, oob_score=False, random_state=None,\n verbose=0, warm_start=False)", "params": "{'n_estimators': [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'RF'", "AlgorithmsIDsEnd": "2446"}}
{"duration": 779.0136120319366, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=139,\n n_jobs=None, oob_score=False, random_state=None,\n verbose=0, warm_start=False)", "params": "{'n_estimators': [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'RF'", "AlgorithmsIDsEnd": "2446"}}

@ -1 +0,0 @@
{"duration": 505.02412009239197, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=139,\n n_jobs=None, oob_score=False, random_state=None, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'ExtraT'", "AlgorithmsIDsEnd": "2606"}}

@ -0,0 +1 @@
{"duration": 476.436359167099, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "AdaBoostClassifier(algorithm='SAMME', base_estimator=None, learning_rate=1.2,\n n_estimators=79, random_state=None)", "params": "{'n_estimators': [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79], 'learning_rate': [0.1, 1.2000000000000002], 'algorithm': ['SAMME.R', 'SAMME']}", "eachAlgor": "'AdaB'", "AlgorithmsIDsEnd": "2766"}}

@ -1 +1 @@
{"duration": 43.723469972610474, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=0.99,\n solver='eigen', store_covariance=False, tol=0.0001)", "params": "{'shrinkage': [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35000000000000003, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41000000000000003, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47000000000000003, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.5700000000000001, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.6900000000000001, 0.7000000000000001, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.8200000000000001, 0.8300000000000001, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.9400000000000001, 0.9500000000000001, 0.96, 0.97, 0.98, 0.99], 'solver': ['lsqr', 'eigen']}", "eachAlgor": "'LDA'", "AlgorithmsIDsEnd": "1996"}}
{"duration": 65.16387891769409, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=0.99,\n solver='eigen', store_covariance=False, tol=0.0001)", "params": "{'shrinkage': [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35000000000000003, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41000000000000003, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47000000000000003, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.5700000000000001, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.6900000000000001, 0.7000000000000001, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.8200000000000001, 0.8300000000000001, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.9400000000000001, 0.9500000000000001, 0.96, 0.97, 0.98, 0.99], 'solver': ['lsqr', 'eigen']}", "eachAlgor": "'LDA'", "AlgorithmsIDsEnd": "1996"}}

@ -1 +1 @@
{"duration": 307.30965399742126, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "KNeighborsClassifier(algorithm='ball_tree', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=None, n_neighbors=24, p=2,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}
{"duration": 373.67042565345764, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "KNeighborsClassifier(algorithm='ball_tree', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=None, n_neighbors=24, p=2,\n weights='distance')", "params": "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}", "eachAlgor": "'KNN'", "AlgorithmsIDsEnd": "0"}}

File diff suppressed because one or more lines are too long

@ -0,0 +1 @@
{"duration": 286.56338810920715, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "LogisticRegression(C=1.925, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=200,\n multi_class='auto', n_jobs=None, penalty='none',\n random_state=None, solver='saga', tol=0.0001, verbose=0,\n warm_start=False)", "params": "{'C': [0.5, 0.575, 0.6499999999999999, 0.7249999999999999, 0.7999999999999998, 0.8749999999999998, 0.9499999999999997, 1.0249999999999997, 1.0999999999999996, 1.1749999999999996, 1.2499999999999996, 1.3249999999999995, 1.3999999999999995, 1.4749999999999994, 1.5499999999999994, 1.6249999999999993, 1.6999999999999993, 1.7749999999999992, 1.8499999999999992, 1.9249999999999992], 'max_iter': [50, 100, 150, 200], 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}", "eachAlgor": "'LR'", "AlgorithmsIDsEnd": "1356"}}

@ -0,0 +1 @@
{"duration": 515.3227281570435, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "SVC(C=4.39, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='scale', kernel='sigmoid',\n max_iter=-1, probability=True, random_state=None, shrinking=True, tol=0.001,\n verbose=False)", "params": "{'C': [0.1, 0.21000000000000002, 0.32000000000000006, 0.43000000000000005, 0.54, 0.65, 0.7600000000000001, 0.8700000000000001, 0.9800000000000001, 1.09, 1.2000000000000002, 1.3100000000000003, 1.4200000000000004, 1.5300000000000002, 1.6400000000000003, 1.7500000000000002, 1.8600000000000003, 1.9700000000000004, 2.08, 2.1900000000000004, 2.3000000000000003, 2.4100000000000006, 2.5200000000000005, 2.6300000000000003, 2.7400000000000007, 2.8500000000000005, 2.9600000000000004, 3.0700000000000003, 3.1800000000000006, 3.2900000000000005, 3.4000000000000004, 3.5100000000000007, 3.6200000000000006, 3.7300000000000004, 3.8400000000000007, 3.9500000000000006, 4.0600000000000005, 4.17, 4.28, 4.390000000000001], 'kernel': ['rbf', 'linear', 'poly', 'sigmoid']}", "eachAlgor": "'SVC'", "AlgorithmsIDsEnd": "576"}}

@ -0,0 +1 @@
{"duration": 610.0474598407745, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0, class_weight=None,\n criterion='entropy', max_depth=None, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=139,\n n_jobs=None, oob_score=False, random_state=None, verbose=0,\n warm_start=False)", "params": "{'n_estimators': [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139], 'criterion': ['gini', 'entropy']}", "eachAlgor": "'ExtraT'", "AlgorithmsIDsEnd": "2606"}}

@ -1 +1 @@
{"duration": 396.95492720603943, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "AdaBoostClassifier(algorithm='SAMME', base_estimator=None, learning_rate=1.2,\n n_estimators=79, random_state=None)", "params": "{'n_estimators': [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79], 'learning_rate': [0.1, 1.2000000000000002], 'algorithm': ['SAMME.R', 'SAMME']}", "eachAlgor": "'AdaB'", "AlgorithmsIDsEnd": "2766"}}
{"duration": 243.32867527008057, "input_args": {"XData": " sepal_l sepal_w petal_l petal_w\n0 6.3 3.3 6.0 2.5\n1 7.1 3.0 5.9 2.1\n2 5.8 2.7 5.1 1.9\n3 6.3 2.9 5.6 1.8\n4 7.6 3.0 6.6 2.1\n.. ... ... ... ...\n145 5.1 3.8 1.6 0.2\n146 5.0 3.5 1.6 0.6\n147 5.1 3.4 1.5 0.2\n148 4.6 3.2 1.4 0.2\n149 4.8 3.0 1.4 0.3\n\n[150 rows x 4 columns]", "yData": "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]", "clf": "MLPClassifier(activation='tanh', alpha=0.00081, batch_size='auto', beta_1=0.9,\n beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant',\n learning_rate_init=0.001, max_fun=15000, max_iter=100,\n momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True,\n power_t=0.5, random_state=None, shuffle=True, solver='sgd',\n tol=0.00081, validation_fraction=0.1, verbose=False,\n warm_start=False)", "params": "{'alpha': [1e-05, 0.00021, 0.00041000000000000005, 0.0006100000000000001, 0.0008100000000000001], 'tol': [1e-05, 0.00041000000000000005, 0.0008100000000000001], 'max_iter': [100], 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver': ['adam', 'sgd']}", "eachAlgor": "'MLP'", "AlgorithmsIDsEnd": "1236"}}

@ -1,7 +1,7 @@
# first line: 542
# first line: 556
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
print('test')
print('start')
# instantiate spark session
spark = (
SparkSession
@ -82,6 +82,12 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
loop = 10
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
@ -177,8 +183,10 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.clip(lower=0)
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(impDataInst)) # Position: 8 and so on
return results

@ -0,0 +1,769 @@
Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DPF,Age,Outcome*
6,148,72,35,0,33.6,0.627,50,Positive
1,85,66,29,0,26.6,0.351,31,Negative
8,183,64,0,0,23.3,0.672,32,Positive
1,89,66,23,94,28.1,0.167,21,Negative
0,137,40,35,168,43.1,2.288,33,Positive
5,116,74,0,0,25.6,0.201,30,Negative
3,78,50,32,88,31,0.248,26,Positive
10,115,0,0,0,35.3,0.134,29,Negative
2,197,70,45,543,30.5,0.158,53,Positive
8,125,96,0,0,0,0.232,54,Positive
4,110,92,0,0,37.6,0.191,30,Negative
10,168,74,0,0,38,0.537,34,Positive
10,139,80,0,0,27.1,1.441,57,Negative
1,189,60,23,846,30.1,0.398,59,Positive
5,166,72,19,175,25.8,0.587,51,Positive
7,100,0,0,0,30,0.484,32,Positive
0,118,84,47,230,45.8,0.551,31,Positive
7,107,74,0,0,29.6,0.254,31,Positive
1,103,30,38,83,43.3,0.183,33,Negative
1,115,70,30,96,34.6,0.529,32,Positive
3,126,88,41,235,39.3,0.704,27,Negative
8,99,84,0,0,35.4,0.388,50,Negative
7,196,90,0,0,39.8,0.451,41,Positive
9,119,80,35,0,29,0.263,29,Positive
11,143,94,33,146,36.6,0.254,51,Positive
10,125,70,26,115,31.1,0.205,41,Positive
7,147,76,0,0,39.4,0.257,43,Positive
1,97,66,15,140,23.2,0.487,22,Negative
13,145,82,19,110,22.2,0.245,57,Negative
5,117,92,0,0,34.1,0.337,38,Negative
5,109,75,26,0,36,0.546,60,Negative
3,158,76,36,245,31.6,0.851,28,Positive
3,88,58,11,54,24.8,0.267,22,Negative
6,92,92,0,0,19.9,0.188,28,Negative
10,122,78,31,0,27.6,0.512,45,Negative
4,103,60,33,192,24,0.966,33,Negative
11,138,76,0,0,33.2,0.42,35,Negative
9,102,76,37,0,32.9,0.665,46,Positive
2,90,68,42,0,38.2,0.503,27,Positive
4,111,72,47,207,37.1,1.39,56,Positive
3,180,64,25,70,34,0.271,26,Negative
7,133,84,0,0,40.2,0.696,37,Negative
7,106,92,18,0,22.7,0.235,48,Negative
9,171,110,24,240,45.4,0.721,54,Positive
7,159,64,0,0,27.4,0.294,40,Negative
0,180,66,39,0,42,1.893,25,Positive
1,146,56,0,0,29.7,0.564,29,Negative
2,71,70,27,0,28,0.586,22,Negative
7,103,66,32,0,39.1,0.344,31,Positive
7,105,0,0,0,0,0.305,24,Negative
1,103,80,11,82,19.4,0.491,22,Negative
1,101,50,15,36,24.2,0.526,26,Negative
5,88,66,21,23,24.4,0.342,30,Negative
8,176,90,34,300,33.7,0.467,58,Positive
7,150,66,42,342,34.7,0.718,42,Negative
1,73,50,10,0,23,0.248,21,Negative
7,187,68,39,304,37.7,0.254,41,Positive
0,100,88,60,110,46.8,0.962,31,Negative
0,146,82,0,0,40.5,1.781,44,Negative
0,105,64,41,142,41.5,0.173,22,Negative
2,84,0,0,0,0,0.304,21,Negative
8,133,72,0,0,32.9,0.27,39,Positive
5,44,62,0,0,25,0.587,36,Negative
2,141,58,34,128,25.4,0.699,24,Negative
7,114,66,0,0,32.8,0.258,42,Positive
5,99,74,27,0,29,0.203,32,Negative
0,109,88,30,0,32.5,0.855,38,Positive
2,109,92,0,0,42.7,0.845,54,Negative
1,95,66,13,38,19.6,0.334,25,Negative
4,146,85,27,100,28.9,0.189,27,Negative
2,100,66,20,90,32.9,0.867,28,Positive
5,139,64,35,140,28.6,0.411,26,Negative
13,126,90,0,0,43.4,0.583,42,Positive
4,129,86,20,270,35.1,0.231,23,Negative
1,79,75,30,0,32,0.396,22,Negative
1,0,48,20,0,24.7,0.14,22,Negative
7,62,78,0,0,32.6,0.391,41,Negative
5,95,72,33,0,37.7,0.37,27,Negative
0,131,0,0,0,43.2,0.27,26,Positive
2,112,66,22,0,25,0.307,24,Negative
3,113,44,13,0,22.4,0.14,22,Negative
2,74,0,0,0,0,0.102,22,Negative
7,83,78,26,71,29.3,0.767,36,Negative
0,101,65,28,0,24.6,0.237,22,Negative
5,137,108,0,0,48.8,0.227,37,Positive
2,110,74,29,125,32.4,0.698,27,Negative
13,106,72,54,0,36.6,0.178,45,Negative
2,100,68,25,71,38.5,0.324,26,Negative
15,136,70,32,110,37.1,0.153,43,Positive
1,107,68,19,0,26.5,0.165,24,Negative
1,80,55,0,0,19.1,0.258,21,Negative
4,123,80,15,176,32,0.443,34,Negative
7,81,78,40,48,46.7,0.261,42,Negative
4,134,72,0,0,23.8,0.277,60,Positive
2,142,82,18,64,24.7,0.761,21,Negative
6,144,72,27,228,33.9,0.255,40,Negative
2,92,62,28,0,31.6,0.13,24,Negative
1,71,48,18,76,20.4,0.323,22,Negative
6,93,50,30,64,28.7,0.356,23,Negative
1,122,90,51,220,49.7,0.325,31,Positive
1,163,72,0,0,39,1.222,33,Positive
1,151,60,0,0,26.1,0.179,22,Negative
0,125,96,0,0,22.5,0.262,21,Negative
1,81,72,18,40,26.6,0.283,24,Negative
2,85,65,0,0,39.6,0.93,27,Negative
1,126,56,29,152,28.7,0.801,21,Negative
1,96,122,0,0,22.4,0.207,27,Negative
4,144,58,28,140,29.5,0.287,37,Negative
3,83,58,31,18,34.3,0.336,25,Negative
0,95,85,25,36,37.4,0.247,24,Positive
3,171,72,33,135,33.3,0.199,24,Positive
8,155,62,26,495,34,0.543,46,Positive
1,89,76,34,37,31.2,0.192,23,Negative
4,76,62,0,0,34,0.391,25,Negative
7,160,54,32,175,30.5,0.588,39,Positive
4,146,92,0,0,31.2,0.539,61,Positive
5,124,74,0,0,34,0.22,38,Positive
5,78,48,0,0,33.7,0.654,25,Negative
4,97,60,23,0,28.2,0.443,22,Negative
4,99,76,15,51,23.2,0.223,21,Negative
0,162,76,56,100,53.2,0.759,25,Positive
6,111,64,39,0,34.2,0.26,24,Negative
2,107,74,30,100,33.6,0.404,23,Negative
5,132,80,0,0,26.8,0.186,69,Negative
0,113,76,0,0,33.3,0.278,23,Positive
1,88,30,42,99,55,0.496,26,Positive
3,120,70,30,135,42.9,0.452,30,Negative
1,118,58,36,94,33.3,0.261,23,Negative
1,117,88,24,145,34.5,0.403,40,Positive
0,105,84,0,0,27.9,0.741,62,Positive
4,173,70,14,168,29.7,0.361,33,Positive
9,122,56,0,0,33.3,1.114,33,Positive
3,170,64,37,225,34.5,0.356,30,Positive
8,84,74,31,0,38.3,0.457,39,Negative
2,96,68,13,49,21.1,0.647,26,Negative
2,125,60,20,140,33.8,0.088,31,Negative
0,100,70,26,50,30.8,0.597,21,Negative
0,93,60,25,92,28.7,0.532,22,Negative
0,129,80,0,0,31.2,0.703,29,Negative
5,105,72,29,325,36.9,0.159,28,Negative
3,128,78,0,0,21.1,0.268,55,Negative
5,106,82,30,0,39.5,0.286,38,Negative
2,108,52,26,63,32.5,0.318,22,Negative
10,108,66,0,0,32.4,0.272,42,Positive
4,154,62,31,284,32.8,0.237,23,Negative
0,102,75,23,0,0,0.572,21,Negative
9,57,80,37,0,32.8,0.096,41,Negative
2,106,64,35,119,30.5,1.4,34,Negative
5,147,78,0,0,33.7,0.218,65,Negative
2,90,70,17,0,27.3,0.085,22,Negative
1,136,74,50,204,37.4,0.399,24,Negative
4,114,65,0,0,21.9,0.432,37,Negative
9,156,86,28,155,34.3,1.189,42,Positive
1,153,82,42,485,40.6,0.687,23,Negative
8,188,78,0,0,47.9,0.137,43,Positive
7,152,88,44,0,50,0.337,36,Positive
2,99,52,15,94,24.6,0.637,21,Negative
1,109,56,21,135,25.2,0.833,23,Negative
2,88,74,19,53,29,0.229,22,Negative
17,163,72,41,114,40.9,0.817,47,Positive
4,151,90,38,0,29.7,0.294,36,Negative
7,102,74,40,105,37.2,0.204,45,Negative
0,114,80,34,285,44.2,0.167,27,Negative
2,100,64,23,0,29.7,0.368,21,Negative
0,131,88,0,0,31.6,0.743,32,Positive
6,104,74,18,156,29.9,0.722,41,Positive
3,148,66,25,0,32.5,0.256,22,Negative
4,120,68,0,0,29.6,0.709,34,Negative
4,110,66,0,0,31.9,0.471,29,Negative
3,111,90,12,78,28.4,0.495,29,Negative
6,102,82,0,0,30.8,0.18,36,Positive
6,134,70,23,130,35.4,0.542,29,Positive
2,87,0,23,0,28.9,0.773,25,Negative
1,79,60,42,48,43.5,0.678,23,Negative
2,75,64,24,55,29.7,0.37,33,Negative
8,179,72,42,130,32.7,0.719,36,Positive
6,85,78,0,0,31.2,0.382,42,Negative
0,129,110,46,130,67.1,0.319,26,Positive
5,143,78,0,0,45,0.19,47,Negative
5,130,82,0,0,39.1,0.956,37,Positive
6,87,80,0,0,23.2,0.084,32,Negative
0,119,64,18,92,34.9,0.725,23,Negative
1,0,74,20,23,27.7,0.299,21,Negative
5,73,60,0,0,26.8,0.268,27,Negative
4,141,74,0,0,27.6,0.244,40,Negative
7,194,68,28,0,35.9,0.745,41,Positive
8,181,68,36,495,30.1,0.615,60,Positive
1,128,98,41,58,32,1.321,33,Positive
8,109,76,39,114,27.9,0.64,31,Positive
5,139,80,35,160,31.6,0.361,25,Positive
3,111,62,0,0,22.6,0.142,21,Negative
9,123,70,44,94,33.1,0.374,40,Negative
7,159,66,0,0,30.4,0.383,36,Positive
11,135,0,0,0,52.3,0.578,40,Positive
8,85,55,20,0,24.4,0.136,42,Negative
5,158,84,41,210,39.4,0.395,29,Positive
1,105,58,0,0,24.3,0.187,21,Negative
3,107,62,13,48,22.9,0.678,23,Positive
4,109,64,44,99,34.8,0.905,26,Positive
4,148,60,27,318,30.9,0.15,29,Positive
0,113,80,16,0,31,0.874,21,Negative
1,138,82,0,0,40.1,0.236,28,Negative
0,108,68,20,0,27.3,0.787,32,Negative
2,99,70,16,44,20.4,0.235,27,Negative
6,103,72,32,190,37.7,0.324,55,Negative
5,111,72,28,0,23.9,0.407,27,Negative
8,196,76,29,280,37.5,0.605,57,Positive
5,162,104,0,0,37.7,0.151,52,Positive
1,96,64,27,87,33.2,0.289,21,Negative
7,184,84,33,0,35.5,0.355,41,Positive
2,81,60,22,0,27.7,0.29,25,Negative
0,147,85,54,0,42.8,0.375,24,Negative
7,179,95,31,0,34.2,0.164,60,Negative
0,140,65,26,130,42.6,0.431,24,Positive
9,112,82,32,175,34.2,0.26,36,Positive
12,151,70,40,271,41.8,0.742,38,Positive
5,109,62,41,129,35.8,0.514,25,Positive
6,125,68,30,120,30,0.464,32,Negative
5,85,74,22,0,29,1.224,32,Positive
5,112,66,0,0,37.8,0.261,41,Positive
0,177,60,29,478,34.6,1.072,21,Positive
2,158,90,0,0,31.6,0.805,66,Positive
7,119,0,0,0,25.2,0.209,37,Negative
7,142,60,33,190,28.8,0.687,61,Negative
1,100,66,15,56,23.6,0.666,26,Negative
1,87,78,27,32,34.6,0.101,22,Negative
0,101,76,0,0,35.7,0.198,26,Negative
3,162,52,38,0,37.2,0.652,24,Positive
4,197,70,39,744,36.7,2.329,31,Negative
0,117,80,31,53,45.2,0.089,24,Negative
4,142,86,0,0,44,0.645,22,Positive
6,134,80,37,370,46.2,0.238,46,Positive
1,79,80,25,37,25.4,0.583,22,Negative
4,122,68,0,0,35,0.394,29,Negative
3,74,68,28,45,29.7,0.293,23,Negative
4,171,72,0,0,43.6,0.479,26,Positive
7,181,84,21,192,35.9,0.586,51,Positive
0,179,90,27,0,44.1,0.686,23,Positive
9,164,84,21,0,30.8,0.831,32,Positive
0,104,76,0,0,18.4,0.582,27,Negative
1,91,64,24,0,29.2,0.192,21,Negative
4,91,70,32,88,33.1,0.446,22,Negative
3,139,54,0,0,25.6,0.402,22,Positive
6,119,50,22,176,27.1,1.318,33,Positive
2,146,76,35,194,38.2,0.329,29,Negative
9,184,85,15,0,30,1.213,49,Positive
10,122,68,0,0,31.2,0.258,41,Negative
0,165,90,33,680,52.3,0.427,23,Negative
9,124,70,33,402,35.4,0.282,34,Negative
1,111,86,19,0,30.1,0.143,23,Negative
9,106,52,0,0,31.2,0.38,42,Negative
2,129,84,0,0,28,0.284,27,Negative
2,90,80,14,55,24.4,0.249,24,Negative
0,86,68,32,0,35.8,0.238,25,Negative
12,92,62,7,258,27.6,0.926,44,Positive
1,113,64,35,0,33.6,0.543,21,Positive
3,111,56,39,0,30.1,0.557,30,Negative
2,114,68,22,0,28.7,0.092,25,Negative
1,193,50,16,375,25.9,0.655,24,Negative
11,155,76,28,150,33.3,1.353,51,Positive
3,191,68,15,130,30.9,0.299,34,Negative
3,141,0,0,0,30,0.761,27,Positive
4,95,70,32,0,32.1,0.612,24,Negative
3,142,80,15,0,32.4,0.2,63,Negative
4,123,62,0,0,32,0.226,35,Positive
5,96,74,18,67,33.6,0.997,43,Negative
0,138,0,0,0,36.3,0.933,25,Positive
2,128,64,42,0,40,1.101,24,Negative
0,102,52,0,0,25.1,0.078,21,Negative
2,146,0,0,0,27.5,0.24,28,Positive
10,101,86,37,0,45.6,1.136,38,Positive
2,108,62,32,56,25.2,0.128,21,Negative
3,122,78,0,0,23,0.254,40,Negative
1,71,78,50,45,33.2,0.422,21,Negative
13,106,70,0,0,34.2,0.251,52,Negative
2,100,70,52,57,40.5,0.677,25,Negative
7,106,60,24,0,26.5,0.296,29,Positive
0,104,64,23,116,27.8,0.454,23,Negative
5,114,74,0,0,24.9,0.744,57,Negative
2,108,62,10,278,25.3,0.881,22,Negative
0,146,70,0,0,37.9,0.334,28,Positive
10,129,76,28,122,35.9,0.28,39,Negative
7,133,88,15,155,32.4,0.262,37,Negative
7,161,86,0,0,30.4,0.165,47,Positive
2,108,80,0,0,27,0.259,52,Positive
7,136,74,26,135,26,0.647,51,Negative
5,155,84,44,545,38.7,0.619,34,Negative
1,119,86,39,220,45.6,0.808,29,Positive
4,96,56,17,49,20.8,0.34,26,Negative
5,108,72,43,75,36.1,0.263,33,Negative
0,78,88,29,40,36.9,0.434,21,Negative
0,107,62,30,74,36.6,0.757,25,Positive
2,128,78,37,182,43.3,1.224,31,Positive
1,128,48,45,194,40.5,0.613,24,Positive
0,161,50,0,0,21.9,0.254,65,Negative
6,151,62,31,120,35.5,0.692,28,Negative
2,146,70,38,360,28,0.337,29,Positive
0,126,84,29,215,30.7,0.52,24,Negative
14,100,78,25,184,36.6,0.412,46,Positive
8,112,72,0,0,23.6,0.84,58,Negative
0,167,0,0,0,32.3,0.839,30,Positive
2,144,58,33,135,31.6,0.422,25,Positive
5,77,82,41,42,35.8,0.156,35,Negative
5,115,98,0,0,52.9,0.209,28,Positive
3,150,76,0,0,21,0.207,37,Negative
2,120,76,37,105,39.7,0.215,29,Negative
10,161,68,23,132,25.5,0.326,47,Positive
0,137,68,14,148,24.8,0.143,21,Negative
0,128,68,19,180,30.5,1.391,25,Positive
2,124,68,28,205,32.9,0.875,30,Positive
6,80,66,30,0,26.2,0.313,41,Negative
0,106,70,37,148,39.4,0.605,22,Negative
2,155,74,17,96,26.6,0.433,27,Positive
3,113,50,10,85,29.5,0.626,25,Negative
7,109,80,31,0,35.9,1.127,43,Positive
2,112,68,22,94,34.1,0.315,26,Negative
3,99,80,11,64,19.3,0.284,30,Negative
3,182,74,0,0,30.5,0.345,29,Positive
3,115,66,39,140,38.1,0.15,28,Negative
6,194,78,0,0,23.5,0.129,59,Positive
4,129,60,12,231,27.5,0.527,31,Negative
3,112,74,30,0,31.6,0.197,25,Positive
0,124,70,20,0,27.4,0.254,36,Positive
13,152,90,33,29,26.8,0.731,43,Positive
2,112,75,32,0,35.7,0.148,21,Negative
1,157,72,21,168,25.6,0.123,24,Negative
1,122,64,32,156,35.1,0.692,30,Positive
10,179,70,0,0,35.1,0.2,37,Negative
2,102,86,36,120,45.5,0.127,23,Positive
6,105,70,32,68,30.8,0.122,37,Negative
8,118,72,19,0,23.1,1.476,46,Negative
2,87,58,16,52,32.7,0.166,25,Negative
1,180,0,0,0,43.3,0.282,41,Positive
12,106,80,0,0,23.6,0.137,44,Negative
1,95,60,18,58,23.9,0.26,22,Negative
0,165,76,43,255,47.9,0.259,26,Negative
0,117,0,0,0,33.8,0.932,44,Negative
5,115,76,0,0,31.2,0.343,44,Positive
9,152,78,34,171,34.2,0.893,33,Positive
7,178,84,0,0,39.9,0.331,41,Positive
1,130,70,13,105,25.9,0.472,22,Negative
1,95,74,21,73,25.9,0.673,36,Negative
1,0,68,35,0,32,0.389,22,Negative
5,122,86,0,0,34.7,0.29,33,Negative
8,95,72,0,0,36.8,0.485,57,Negative
8,126,88,36,108,38.5,0.349,49,Negative
1,139,46,19,83,28.7,0.654,22,Negative
3,116,0,0,0,23.5,0.187,23,Negative
3,99,62,19,74,21.8,0.279,26,Negative
5,0,80,32,0,41,0.346,37,Positive
4,92,80,0,0,42.2,0.237,29,Negative
4,137,84,0,0,31.2,0.252,30,Negative
3,61,82,28,0,34.4,0.243,46,Negative
1,90,62,12,43,27.2,0.58,24,Negative
3,90,78,0,0,42.7,0.559,21,Negative
9,165,88,0,0,30.4,0.302,49,Positive
1,125,50,40,167,33.3,0.962,28,Positive
13,129,0,30,0,39.9,0.569,44,Positive
12,88,74,40,54,35.3,0.378,48,Negative
1,196,76,36,249,36.5,0.875,29,Positive
5,189,64,33,325,31.2,0.583,29,Positive
5,158,70,0,0,29.8,0.207,63,Negative
5,103,108,37,0,39.2,0.305,65,Negative
4,146,78,0,0,38.5,0.52,67,Positive
4,147,74,25,293,34.9,0.385,30,Negative
5,99,54,28,83,34,0.499,30,Negative
6,124,72,0,0,27.6,0.368,29,Positive
0,101,64,17,0,21,0.252,21,Negative
3,81,86,16,66,27.5,0.306,22,Negative
1,133,102,28,140,32.8,0.234,45,Positive
3,173,82,48,465,38.4,2.137,25,Positive
0,118,64,23,89,0,1.731,21,Negative
0,84,64,22,66,35.8,0.545,21,Negative
2,105,58,40,94,34.9,0.225,25,Negative
2,122,52,43,158,36.2,0.816,28,Negative
12,140,82,43,325,39.2,0.528,58,Positive
0,98,82,15,84,25.2,0.299,22,Negative
1,87,60,37,75,37.2,0.509,22,Negative
4,156,75,0,0,48.3,0.238,32,Positive
0,93,100,39,72,43.4,1.021,35,Negative
1,107,72,30,82,30.8,0.821,24,Negative
0,105,68,22,0,20,0.236,22,Negative
1,109,60,8,182,25.4,0.947,21,Negative
1,90,62,18,59,25.1,1.268,25,Negative
1,125,70,24,110,24.3,0.221,25,Negative
1,119,54,13,50,22.3,0.205,24,Negative
5,116,74,29,0,32.3,0.66,35,Positive
8,105,100,36,0,43.3,0.239,45,Positive
5,144,82,26,285,32,0.452,58,Positive
3,100,68,23,81,31.6,0.949,28,Negative
1,100,66,29,196,32,0.444,42,Negative
5,166,76,0,0,45.7,0.34,27,Positive
1,131,64,14,415,23.7,0.389,21,Negative
4,116,72,12,87,22.1,0.463,37,Negative
4,158,78,0,0,32.9,0.803,31,Positive
2,127,58,24,275,27.7,1.6,25,Negative
3,96,56,34,115,24.7,0.944,39,Negative
0,131,66,40,0,34.3,0.196,22,Positive
3,82,70,0,0,21.1,0.389,25,Negative
3,193,70,31,0,34.9,0.241,25,Positive
4,95,64,0,0,32,0.161,31,Positive
6,137,61,0,0,24.2,0.151,55,Negative
5,136,84,41,88,35,0.286,35,Positive
9,72,78,25,0,31.6,0.28,38,Negative
5,168,64,0,0,32.9,0.135,41,Positive
2,123,48,32,165,42.1,0.52,26,Negative
4,115,72,0,0,28.9,0.376,46,Positive
0,101,62,0,0,21.9,0.336,25,Negative
8,197,74,0,0,25.9,1.191,39,Positive
1,172,68,49,579,42.4,0.702,28,Positive
6,102,90,39,0,35.7,0.674,28,Negative
1,112,72,30,176,34.4,0.528,25,Negative
1,143,84,23,310,42.4,1.076,22,Negative
1,143,74,22,61,26.2,0.256,21,Negative
0,138,60,35,167,34.6,0.534,21,Positive
3,173,84,33,474,35.7,0.258,22,Positive
1,97,68,21,0,27.2,1.095,22,Negative
4,144,82,32,0,38.5,0.554,37,Positive
1,83,68,0,0,18.2,0.624,27,Negative
3,129,64,29,115,26.4,0.219,28,Positive
1,119,88,41,170,45.3,0.507,26,Negative
2,94,68,18,76,26,0.561,21,Negative
0,102,64,46,78,40.6,0.496,21,Negative
2,115,64,22,0,30.8,0.421,21,Negative
8,151,78,32,210,42.9,0.516,36,Positive
4,184,78,39,277,37,0.264,31,Positive
0,94,0,0,0,0,0.256,25,Negative
1,181,64,30,180,34.1,0.328,38,Positive
0,135,94,46,145,40.6,0.284,26,Negative
1,95,82,25,180,35,0.233,43,Positive
2,99,0,0,0,22.2,0.108,23,Negative
3,89,74,16,85,30.4,0.551,38,Negative
1,80,74,11,60,30,0.527,22,Negative
2,139,75,0,0,25.6,0.167,29,Negative
1,90,68,8,0,24.5,1.138,36,Negative
0,141,0,0,0,42.4,0.205,29,Positive
12,140,85,33,0,37.4,0.244,41,Negative
5,147,75,0,0,29.9,0.434,28,Negative
1,97,70,15,0,18.2,0.147,21,Negative
6,107,88,0,0,36.8,0.727,31,Negative
0,189,104,25,0,34.3,0.435,41,Positive
2,83,66,23,50,32.2,0.497,22,Negative
4,117,64,27,120,33.2,0.23,24,Negative
8,108,70,0,0,30.5,0.955,33,Positive
4,117,62,12,0,29.7,0.38,30,Positive
0,180,78,63,14,59.4,2.42,25,Positive
1,100,72,12,70,25.3,0.658,28,Negative
0,95,80,45,92,36.5,0.33,26,Negative
0,104,64,37,64,33.6,0.51,22,Positive
0,120,74,18,63,30.5,0.285,26,Negative
1,82,64,13,95,21.2,0.415,23,Negative
2,134,70,0,0,28.9,0.542,23,Positive
0,91,68,32,210,39.9,0.381,25,Negative
2,119,0,0,0,19.6,0.832,72,Negative
2,100,54,28,105,37.8,0.498,24,Negative
14,175,62,30,0,33.6,0.212,38,Positive
1,135,54,0,0,26.7,0.687,62,Negative
5,86,68,28,71,30.2,0.364,24,Negative
10,148,84,48,237,37.6,1.001,51,Positive
9,134,74,33,60,25.9,0.46,81,Negative
9,120,72,22,56,20.8,0.733,48,Negative
1,71,62,0,0,21.8,0.416,26,Negative
8,74,70,40,49,35.3,0.705,39,Negative
5,88,78,30,0,27.6,0.258,37,Negative
10,115,98,0,0,24,1.022,34,Negative
0,124,56,13,105,21.8,0.452,21,Negative
0,74,52,10,36,27.8,0.269,22,Negative
0,97,64,36,100,36.8,0.6,25,Negative
8,120,0,0,0,30,0.183,38,Positive
6,154,78,41,140,46.1,0.571,27,Negative
1,144,82,40,0,41.3,0.607,28,Negative
0,137,70,38,0,33.2,0.17,22,Negative
0,119,66,27,0,38.8,0.259,22,Negative
7,136,90,0,0,29.9,0.21,50,Negative
4,114,64,0,0,28.9,0.126,24,Negative
0,137,84,27,0,27.3,0.231,59,Negative
2,105,80,45,191,33.7,0.711,29,Positive
7,114,76,17,110,23.8,0.466,31,Negative
8,126,74,38,75,25.9,0.162,39,Negative
4,132,86,31,0,28,0.419,63,Negative
3,158,70,30,328,35.5,0.344,35,Positive
0,123,88,37,0,35.2,0.197,29,Negative
4,85,58,22,49,27.8,0.306,28,Negative
0,84,82,31,125,38.2,0.233,23,Negative
0,145,0,0,0,44.2,0.63,31,Positive
0,135,68,42,250,42.3,0.365,24,Positive
1,139,62,41,480,40.7,0.536,21,Negative
0,173,78,32,265,46.5,1.159,58,Negative
4,99,72,17,0,25.6,0.294,28,Negative
8,194,80,0,0,26.1,0.551,67,Negative
2,83,65,28,66,36.8,0.629,24,Negative
2,89,90,30,0,33.5,0.292,42,Negative
4,99,68,38,0,32.8,0.145,33,Negative
4,125,70,18,122,28.9,1.144,45,Positive
3,80,0,0,0,0,0.174,22,Negative
6,166,74,0,0,26.6,0.304,66,Negative
5,110,68,0,0,26,0.292,30,Negative
2,81,72,15,76,30.1,0.547,25,Negative
7,195,70,33,145,25.1,0.163,55,Positive
6,154,74,32,193,29.3,0.839,39,Negative
2,117,90,19,71,25.2,0.313,21,Negative
3,84,72,32,0,37.2,0.267,28,Negative
6,0,68,41,0,39,0.727,41,Positive
7,94,64,25,79,33.3,0.738,41,Negative
3,96,78,39,0,37.3,0.238,40,Negative
10,75,82,0,0,33.3,0.263,38,Negative
0,180,90,26,90,36.5,0.314,35,Positive
1,130,60,23,170,28.6,0.692,21,Negative
2,84,50,23,76,30.4,0.968,21,Negative
8,120,78,0,0,25,0.409,64,Negative
12,84,72,31,0,29.7,0.297,46,Positive
0,139,62,17,210,22.1,0.207,21,Negative
9,91,68,0,0,24.2,0.2,58,Negative
2,91,62,0,0,27.3,0.525,22,Negative
3,99,54,19,86,25.6,0.154,24,Negative
3,163,70,18,105,31.6,0.268,28,Positive
9,145,88,34,165,30.3,0.771,53,Positive
7,125,86,0,0,37.6,0.304,51,Negative
13,76,60,0,0,32.8,0.18,41,Negative
6,129,90,7,326,19.6,0.582,60,Negative
2,68,70,32,66,25,0.187,25,Negative
3,124,80,33,130,33.2,0.305,26,Negative
6,114,0,0,0,0,0.189,26,Negative
9,130,70,0,0,34.2,0.652,45,Positive
3,125,58,0,0,31.6,0.151,24,Negative
3,87,60,18,0,21.8,0.444,21,Negative
1,97,64,19,82,18.2,0.299,21,Negative
3,116,74,15,105,26.3,0.107,24,Negative
0,117,66,31,188,30.8,0.493,22,Negative
0,111,65,0,0,24.6,0.66,31,Negative
2,122,60,18,106,29.8,0.717,22,Negative
0,107,76,0,0,45.3,0.686,24,Negative
1,86,66,52,65,41.3,0.917,29,Negative
6,91,0,0,0,29.8,0.501,31,Negative
1,77,56,30,56,33.3,1.251,24,Negative
4,132,0,0,0,32.9,0.302,23,Positive
0,105,90,0,0,29.6,0.197,46,Negative
0,57,60,0,0,21.7,0.735,67,Negative
0,127,80,37,210,36.3,0.804,23,Negative
3,129,92,49,155,36.4,0.968,32,Positive
8,100,74,40,215,39.4,0.661,43,Positive
3,128,72,25,190,32.4,0.549,27,Positive
10,90,85,32,0,34.9,0.825,56,Positive
4,84,90,23,56,39.5,0.159,25,Negative
1,88,78,29,76,32,0.365,29,Negative
8,186,90,35,225,34.5,0.423,37,Positive
5,187,76,27,207,43.6,1.034,53,Positive
4,131,68,21,166,33.1,0.16,28,Negative
1,164,82,43,67,32.8,0.341,50,Negative
4,189,110,31,0,28.5,0.68,37,Negative
1,116,70,28,0,27.4,0.204,21,Negative
3,84,68,30,106,31.9,0.591,25,Negative
6,114,88,0,0,27.8,0.247,66,Negative
1,88,62,24,44,29.9,0.422,23,Negative
1,84,64,23,115,36.9,0.471,28,Negative
7,124,70,33,215,25.5,0.161,37,Negative
1,97,70,40,0,38.1,0.218,30,Negative
8,110,76,0,0,27.8,0.237,58,Negative
11,103,68,40,0,46.2,0.126,42,Negative
11,85,74,0,0,30.1,0.3,35,Negative
6,125,76,0,0,33.8,0.121,54,Positive
0,198,66,32,274,41.3,0.502,28,Positive
1,87,68,34,77,37.6,0.401,24,Negative
6,99,60,19,54,26.9,0.497,32,Negative
0,91,80,0,0,32.4,0.601,27,Negative
2,95,54,14,88,26.1,0.748,22,Negative
1,99,72,30,18,38.6,0.412,21,Negative
6,92,62,32,126,32,0.085,46,Negative
4,154,72,29,126,31.3,0.338,37,Negative
0,121,66,30,165,34.3,0.203,33,Positive
3,78,70,0,0,32.5,0.27,39,Negative
2,130,96,0,0,22.6,0.268,21,Negative
3,111,58,31,44,29.5,0.43,22,Negative
2,98,60,17,120,34.7,0.198,22,Negative
1,143,86,30,330,30.1,0.892,23,Negative
1,119,44,47,63,35.5,0.28,25,Negative
6,108,44,20,130,24,0.813,35,Negative
2,118,80,0,0,42.9,0.693,21,Positive
10,133,68,0,0,27,0.245,36,Negative
2,197,70,99,0,34.7,0.575,62,Positive
0,151,90,46,0,42.1,0.371,21,Positive
6,109,60,27,0,25,0.206,27,Negative
12,121,78,17,0,26.5,0.259,62,Negative
8,100,76,0,0,38.7,0.19,42,Negative
8,124,76,24,600,28.7,0.687,52,Positive
1,93,56,11,0,22.5,0.417,22,Negative
8,143,66,0,0,34.9,0.129,41,Positive
6,103,66,0,0,24.3,0.249,29,Negative
3,176,86,27,156,33.3,1.154,52,Positive
0,73,0,0,0,21.1,0.342,25,Negative
11,111,84,40,0,46.8,0.925,45,Positive
2,112,78,50,140,39.4,0.175,24,Negative
3,132,80,0,0,34.4,0.402,44,Positive
2,82,52,22,115,28.5,1.699,25,Negative
6,123,72,45,230,33.6,0.733,34,Negative
0,188,82,14,185,32,0.682,22,Positive
0,67,76,0,0,45.3,0.194,46,Negative
1,89,24,19,25,27.8,0.559,21,Negative
1,173,74,0,0,36.8,0.088,38,Positive
1,109,38,18,120,23.1,0.407,26,Negative
1,108,88,19,0,27.1,0.4,24,Negative
6,96,0,0,0,23.7,0.19,28,Negative
1,124,74,36,0,27.8,0.1,30,Negative
7,150,78,29,126,35.2,0.692,54,Positive
4,183,0,0,0,28.4,0.212,36,Positive
1,124,60,32,0,35.8,0.514,21,Negative
1,181,78,42,293,40,1.258,22,Positive
1,92,62,25,41,19.5,0.482,25,Negative
0,152,82,39,272,41.5,0.27,27,Negative
1,111,62,13,182,24,0.138,23,Negative
3,106,54,21,158,30.9,0.292,24,Negative
3,174,58,22,194,32.9,0.593,36,Positive
7,168,88,42,321,38.2,0.787,40,Positive
6,105,80,28,0,32.5,0.878,26,Negative
11,138,74,26,144,36.1,0.557,50,Positive
3,106,72,0,0,25.8,0.207,27,Negative
6,117,96,0,0,28.7,0.157,30,Negative
2,68,62,13,15,20.1,0.257,23,Negative
9,112,82,24,0,28.2,1.282,50,Positive
0,119,0,0,0,32.4,0.141,24,Positive
2,112,86,42,160,38.4,0.246,28,Negative
2,92,76,20,0,24.2,1.698,28,Negative
6,183,94,0,0,40.8,1.461,45,Negative
0,94,70,27,115,43.5,0.347,21,Negative
2,108,64,0,0,30.8,0.158,21,Negative
4,90,88,47,54,37.7,0.362,29,Negative
0,125,68,0,0,24.7,0.206,21,Negative
0,132,78,0,0,32.4,0.393,21,Negative
5,128,80,0,0,34.6,0.144,45,Negative
4,94,65,22,0,24.7,0.148,21,Negative
7,114,64,0,0,27.4,0.732,34,Positive
0,102,78,40,90,34.5,0.238,24,Negative
2,111,60,0,0,26.2,0.343,23,Negative
1,128,82,17,183,27.5,0.115,22,Negative
10,92,62,0,0,25.9,0.167,31,Negative
13,104,72,0,0,31.2,0.465,38,Positive
5,104,74,0,0,28.8,0.153,48,Negative
2,94,76,18,66,31.6,0.649,23,Negative
7,97,76,32,91,40.9,0.871,32,Positive
1,100,74,12,46,19.5,0.149,28,Negative
0,102,86,17,105,29.3,0.695,27,Negative
4,128,70,0,0,34.3,0.303,24,Negative
6,147,80,0,0,29.5,0.178,50,Positive
4,90,0,0,0,28,0.61,31,Negative
3,103,72,30,152,27.6,0.73,27,Negative
2,157,74,35,440,39.4,0.134,30,Negative
1,167,74,17,144,23.4,0.447,33,Positive
0,179,50,36,159,37.8,0.455,22,Positive
11,136,84,35,130,28.3,0.26,42,Positive
0,107,60,25,0,26.4,0.133,23,Negative
1,91,54,25,100,25.2,0.234,23,Negative
1,117,60,23,106,33.8,0.466,27,Negative
5,123,74,40,77,34.1,0.269,28,Negative
2,120,54,0,0,26.8,0.455,27,Negative
1,106,70,28,135,34.2,0.142,22,Negative
2,155,52,27,540,38.7,0.24,25,Positive
2,101,58,35,90,21.8,0.155,22,Negative
1,120,80,48,200,38.9,1.162,41,Negative
11,127,106,0,0,39,0.19,51,Negative
3,80,82,31,70,34.2,1.292,27,Positive
10,162,84,0,0,27.7,0.182,54,Negative
1,199,76,43,0,42.9,1.394,22,Positive
8,167,106,46,231,37.6,0.165,43,Positive
9,145,80,46,130,37.9,0.637,40,Positive
6,115,60,39,0,33.7,0.245,40,Positive
1,112,80,45,132,34.8,0.217,24,Negative
4,145,82,18,0,32.5,0.235,70,Positive
10,111,70,27,0,27.5,0.141,40,Positive
6,98,58,33,190,34,0.43,43,Negative
9,154,78,30,100,30.9,0.164,45,Negative
6,165,68,26,168,33.6,0.631,49,Negative
1,99,58,10,0,25.4,0.551,21,Negative
10,68,106,23,49,35.5,0.285,47,Negative
3,123,100,35,240,57.3,0.88,22,Negative
8,91,82,0,0,35.6,0.587,68,Negative
6,195,70,0,0,30.9,0.328,31,Positive
9,156,86,0,0,24.8,0.23,53,Positive
0,93,60,0,0,35.3,0.263,25,Negative
3,121,52,0,0,36,0.127,25,Positive
2,101,58,17,265,24.2,0.614,23,Negative
2,56,56,28,45,24.2,0.332,22,Negative
0,162,76,36,0,49.6,0.364,26,Positive
0,95,64,39,105,44.6,0.366,22,Negative
4,125,80,0,0,32.3,0.536,27,Positive
5,136,82,0,0,0,0.64,69,Negative
2,129,74,26,205,33.2,0.591,25,Negative
3,130,64,0,0,23.1,0.314,22,Negative
1,107,50,19,0,28.3,0.181,29,Negative
1,140,74,26,180,24.1,0.828,23,Negative
1,144,82,46,180,46.1,0.335,46,Positive
8,107,80,0,0,24.6,0.856,34,Negative
13,158,114,0,0,42.3,0.257,44,Positive
2,121,70,32,95,39.1,0.886,23,Negative
7,129,68,49,125,38.5,0.439,43,Positive
2,90,60,0,0,23.5,0.191,25,Negative
7,142,90,24,480,30.4,0.128,43,Positive
3,169,74,19,125,29.9,0.268,31,Positive
0,99,0,0,0,25,0.253,22,Negative
4,127,88,11,155,34.5,0.598,28,Negative
4,118,70,0,0,44.5,0.904,26,Negative
2,122,76,27,200,35.9,0.483,26,Negative
6,125,78,31,0,27.6,0.565,49,Positive
1,168,88,29,0,35,0.905,52,Positive
2,129,0,0,0,38.5,0.304,41,Negative
4,110,76,20,100,28.4,0.118,27,Negative
6,80,80,36,0,39.8,0.177,28,Negative
10,115,0,0,0,0,0.261,30,Positive
2,127,46,21,335,34.4,0.176,22,Negative
9,164,78,0,0,32.8,0.148,45,Positive
2,93,64,32,160,38,0.674,23,Positive
3,158,64,13,387,31.2,0.295,24,Negative
5,126,78,27,22,29.6,0.439,40,Negative
10,129,62,36,0,41.2,0.441,38,Positive
0,134,58,20,291,26.4,0.352,21,Negative
3,102,74,0,0,29.5,0.121,32,Negative
7,187,50,33,392,33.9,0.826,34,Positive
3,173,78,39,185,33.8,0.97,31,Positive
10,94,72,18,0,23.1,0.595,56,Negative
1,108,60,46,178,35.5,0.415,24,Negative
5,97,76,27,0,35.6,0.378,52,Positive
4,83,86,19,0,29.3,0.317,34,Negative
1,114,66,36,200,38.1,0.289,21,Negative
1,149,68,29,127,29.3,0.349,42,Positive
5,117,86,30,105,39.1,0.251,42,Negative
1,111,94,0,0,32.8,0.265,45,Negative
4,112,78,40,0,39.4,0.236,38,Negative
1,116,78,29,180,36.1,0.496,25,Negative
0,141,84,26,0,32.4,0.433,22,Negative
2,175,88,0,0,22.9,0.326,22,Negative
2,92,52,0,0,30.1,0.141,22,Negative
3,130,78,23,79,28.4,0.323,34,Positive
8,120,86,0,0,28.4,0.259,22,Positive
2,174,88,37,120,44.5,0.646,24,Positive
2,106,56,27,165,29,0.426,22,Negative
2,105,75,0,0,23.3,0.56,53,Negative
4,95,60,32,0,35.4,0.284,28,Negative
0,126,86,27,120,27.4,0.515,21,Negative
8,65,72,23,0,32,0.6,42,Negative
2,99,60,17,160,36.6,0.453,21,Negative
1,102,74,0,0,39.5,0.293,42,Positive
11,120,80,37,150,42.3,0.785,48,Positive
3,102,44,20,94,30.8,0.4,26,Negative
1,109,58,18,116,28.5,0.219,22,Negative
9,140,94,0,0,32.7,0.734,45,Positive
13,153,88,37,140,40.6,1.174,39,Negative
12,100,84,33,105,30,0.488,46,Negative
1,147,94,41,0,49.3,0.358,27,Positive
1,81,74,41,57,46.3,1.096,32,Negative
3,187,70,22,200,36.4,0.408,36,Positive
6,162,62,0,0,24.3,0.178,50,Positive
4,136,70,0,0,31.2,1.182,22,Positive
1,121,78,39,74,39,0.261,28,Negative
3,108,62,24,0,26,0.223,25,Negative
0,181,88,44,510,43.3,0.222,26,Positive
8,154,78,32,0,32.4,0.443,45,Positive
1,128,88,39,110,36.5,1.057,37,Positive
7,137,90,41,0,32,0.391,39,Negative
0,123,72,0,0,36.3,0.258,52,Positive
1,106,76,0,0,37.5,0.197,26,Negative
6,190,92,0,0,35.5,0.278,66,Positive
2,88,58,26,16,28.4,0.766,22,Negative
9,170,74,31,0,44,0.403,43,Positive
9,89,62,0,0,22.5,0.142,33,Negative
10,101,76,48,180,32.9,0.171,63,Negative
2,122,70,27,0,36.8,0.34,27,Negative
5,121,72,23,112,26.2,0.245,30,Negative
1,126,60,0,0,30.1,0.349,47,Positive
1,93,70,31,0,30.4,0.315,23,Negative
unable to load file from base commit

File diff suppressed because it is too large Load Diff

@ -11,27 +11,28 @@
"start": "npm run dev"
},
"dependencies": {
"@babel/core": "^7.8.3",
"@babel/runtime": "^7.8.3",
"@fortawesome/fontawesome-free": "^5.12.0",
"@fortawesome/fontawesome-svg-core": "^1.2.26",
"@fortawesome/free-solid-svg-icons": "^5.12.0",
"@babel/core": "^7.8.4",
"@babel/runtime": "^7.8.4",
"@fortawesome/fontawesome-free": "^5.12.1",
"@fortawesome/fontawesome-svg-core": "^1.2.27",
"@fortawesome/free-solid-svg-icons": "^5.12.1",
"@fortawesome/vue-fontawesome": "^0.1.9",
"@statnett/vue-plotly": "^0.3.2",
"@types/d3-drag": "^1.2.3",
"@types/node": "^13.5.1",
"ajv": "^6.11.0",
"@types/node": "^13.7.4",
"ajv": "^6.12.0",
"audit": "0.0.6",
"axios": "^0.19.2",
"axios-progress-bar": "^1.2.0",
"babel-preset-vue": "^2.0.2",
"blob": "0.0.5",
"blob": "0.1.0",
"bootstrap": "^4.4.1",
"bootstrap-toggle": "^2.2.2",
"bootstrap-vue": "^2.3.0",
"bootstrap-vue": "^2.5.0",
"circlepack-chart": "^1.3.0",
"clean-webpack-plugin": "^3.0.0",
"colorbrewer": "^1.3.0",
"cryo": "0.0.6",
"d3": "^5.15.0",
"d3-array": "^2.4.0",
"d3-brush": "^1.1.5",
@ -50,15 +51,15 @@
"fs": "0.0.2",
"fs-es6": "0.0.2",
"ify-loader": "^1.1.0",
"interactjs": "^1.8.2",
"interactjs": "^1.8.4",
"jquery": "^3.4.1",
"mdbvue": "^6.3.0",
"mdbvue": "^6.5.0",
"merge": "^1.2.1",
"mini-css-extract-plugin": "^0.9.0",
"npm-check-updates": "^4.0.1",
"papaparse": "^5.1.1",
"parcoord-es": "^2.2.10",
"plotly.js": "^1.52.1",
"plotly.js": "^1.52.2",
"popper.js": "^1.16.1",
"react": "^16.12.0",
"react-dom": "^16.12.0",
@ -75,9 +76,9 @@
"vue-papa-parse": "^1.3.0",
"vue-plotly": "^1.1.0",
"vue-router": "^3.1.5",
"vue-slider-component": "^3.1.0",
"vue-slider-component": "^3.1.1",
"vue2-simplert-plugin": "^0.5.3",
"webpack-cli": "^3.3.10",
"webpack-cli": "^3.3.11",
"webpack-require": "0.0.16"
},
"devDependencies": {
@ -92,7 +93,7 @@
"@babel/plugin-syntax-import-meta": "^7.8.3",
"@babel/plugin-syntax-jsx": "^7.8.3",
"@babel/plugin-transform-runtime": "^7.8.3",
"@babel/preset-env": "^7.8.3",
"@babel/preset-env": "^7.8.4",
"autoprefixer": "^9.7.4",
"babel-eslint": "^10.0.3",
"babel-helper-vue-jsx-merge-props": "^2.0.3",
@ -105,13 +106,13 @@
"eslint-config-standard": "^14.1.0",
"eslint-friendly-formatter": "^4.0.1",
"eslint-loader": "^3.0.3",
"eslint-plugin-import": "^2.20.0",
"eslint-plugin-import": "^2.20.1",
"eslint-plugin-node": "^11.0.0",
"eslint-plugin-promise": "^4.2.1",
"eslint-plugin-standard": "^4.0.1",
"eslint-plugin-vue": "^6.1.2",
"eslint-plugin-vue": "^6.2.1",
"extract-text-webpack-plugin": "^3.0.2",
"file-loader": "^5.0.2",
"file-loader": "^5.1.0",
"friendly-errors-webpack-plugin": "^1.7.0",
"html-webpack-plugin": "^3.2.0",
"node-notifier": "^6.0.0",
@ -121,24 +122,24 @@
"postcss-import": "^12.0.1",
"postcss-loader": "^3.0.0",
"postcss-url": "^8.0.0",
"rimraf": "^3.0.1",
"rimraf": "^3.0.2",
"sass": "^1.25.0",
"sass-loader": "^8.0.2",
"semver": "^7.1.1",
"semver": "^7.1.3",
"shelljs": "^0.8.3",
"uglifyjs-webpack-plugin": "^2.2.0",
"url-loader": "^3.0.0",
"vue-class-component": "^7.2.2",
"vue-cli-plugin-vuetify": "^2.0.3",
"vue-loader": "^15.8.3",
"vue-property-decorator": "^8.3.0",
"vue-class-component": "^7.2.3",
"vue-cli-plugin-vuetify": "^2.0.5",
"vue-loader": "^15.9.0",
"vue-property-decorator": "^8.4.0",
"vue-style-loader": "^4.1.2",
"vue-template-compiler": "^2.6.11",
"vue2-simplert": "^1.0.0",
"vuetify-loader": "^1.4.3",
"webpack": "^4.41.5",
"webpack": "^4.41.6",
"webpack-bundle-analyzer": "^3.6.0",
"webpack-dev-server": "^3.10.1",
"webpack-dev-server": "^3.10.3",
"webpack-merge": "^4.2.2"
},
"browserslist": [

@ -65,13 +65,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgKNN['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgKNN['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgKNN['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgKNN['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgKNN['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgKNN['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgKNN['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgKNN['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgKNN['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgKNN['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgKNN['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgKNN['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgKNN['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgKNN['log_loss'])[j]))
if (sumKNN <= 0) {
sumKNN = 0
}
McKNN.push((sumKNN/divide)*100)
}
var McSVC = []
const performanceAlgSVC = JSON.parse(this.ModelsPerformance[14])
const performanceAlgSVC = JSON.parse(this.ModelsPerformance[15])
for (let j = 0; j < Object.values(performanceAlgSVC['mean_test_accuracy']).length; j++) {
let sumSVC
sumSVC = (factorsLocal[0] * Object.values(performanceAlgSVC['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgSVC['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgSVC['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgSVC['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgSVC['geometric_mean_score_macro'])[j])
@ -79,13 +76,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgSVC['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgSVC['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgSVC['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgSVC['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgSVC['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgSVC['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgSVC['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgSVC['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgSVC['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgSVC['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgSVC['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgSVC['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgSVC['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgSVC['log_loss'])[j]))
if (sumSVC <= 0) {
sumSVC = 0
}
McSVC.push((sumSVC/divide)*100)
}
var McGausNB = []
const performanceAlgGausNB = JSON.parse(this.ModelsPerformance[22])
const performanceAlgGausNB = JSON.parse(this.ModelsPerformance[24])
for (let j = 0; j < Object.values(performanceAlgGausNB['mean_test_accuracy']).length; j++) {
let sumGausNB
sumGausNB = (factorsLocal[0] * Object.values(performanceAlgGausNB['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgGausNB['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgGausNB['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgGausNB['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgGausNB['geometric_mean_score_macro'])[j])
@ -93,13 +87,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgGausNB['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgGausNB['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgGausNB['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgGausNB['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgGausNB['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgGausNB['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgGausNB['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgGausNB['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgGausNB['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgGausNB['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgGausNB['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgGausNB['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgGausNB['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgGausNB['log_loss'])[j]))
if (sumGausNB <= 0) {
sumGausNB = 0
}
McGausNB.push((sumGausNB/divide)*100)
}
var McMLP = []
const performanceAlgMLP = JSON.parse(this.ModelsPerformance[30])
const performanceAlgMLP = JSON.parse(this.ModelsPerformance[33])
for (let j = 0; j < Object.values(performanceAlgMLP['mean_test_accuracy']).length; j++) {
let sumMLP
sumMLP = (factorsLocal[0] * Object.values(performanceAlgMLP['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgMLP['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgMLP['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgMLP['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgMLP['geometric_mean_score_macro'])[j])
@ -107,13 +98,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgMLP['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgMLP['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgMLP['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgMLP['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgMLP['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgMLP['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgMLP['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgMLP['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgMLP['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgMLP['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgMLP['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgMLP['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgMLP['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgMLP['log_loss'])[j]))
if (sumMLP <= 0) {
sumMLP = 0
}
McMLP.push((sumMLP/divide)*100)
}
var McLR = []
const performanceAlgLR = JSON.parse(this.ModelsPerformance[38])
const performanceAlgLR = JSON.parse(this.ModelsPerformance[42])
for (let j = 0; j < Object.values(performanceAlgLR['mean_test_accuracy']).length; j++) {
let sumLR
sumLR = (factorsLocal[0] * Object.values(performanceAlgLR['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgLR['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgLR['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgLR['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgLR['geometric_mean_score_macro'])[j])
@ -121,13 +109,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgLR['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgLR['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgLR['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgLR['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgLR['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgLR['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgLR['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgLR['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgLR['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgLR['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgLR['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgLR['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgLR['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgLR['log_loss'])[j]))
if (sumLR <= 0) {
sumLR = 0
}
McLR.push((sumLR/divide)*100)
}
var McLDA = []
const performanceAlgLDA = JSON.parse(this.ModelsPerformance[46])
const performanceAlgLDA = JSON.parse(this.ModelsPerformance[51])
for (let j = 0; j < Object.values(performanceAlgLDA['mean_test_accuracy']).length; j++) {
let sumLDA
sumLDA = (factorsLocal[0] * Object.values(performanceAlgLDA['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgLDA['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgLDA['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgLDA['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgLDA['geometric_mean_score_macro'])[j])
@ -135,13 +120,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgLDA['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgLDA['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgLDA['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgLDA['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgLDA['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgLDA['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgLDA['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgLDA['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgLDA['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgLDA['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgLDA['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgLDA['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgLDA['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgLDA['log_loss'])[j]))
if (sumLDA <= 0) {
sumLDA = 0
}
McLDA.push((sumLDA/divide)*100)
}
var McQDA = []
const performanceAlgQDA = JSON.parse(this.ModelsPerformance[54])
const performanceAlgQDA = JSON.parse(this.ModelsPerformance[60])
for (let j = 0; j < Object.values(performanceAlgQDA['mean_test_accuracy']).length; j++) {
let sumQDA
sumQDA = (factorsLocal[0] * Object.values(performanceAlgQDA['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgQDA['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgQDA['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgQDA['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgQDA['geometric_mean_score_macro'])[j])
@ -149,13 +131,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgQDA['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgQDA['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgQDA['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgQDA['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgQDA['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgQDA['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgQDA['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgQDA['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgQDA['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgQDA['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgQDA['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgQDA['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgQDA['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgQDA['log_loss'])[j]))
if (sumQDA <= 0) {
sumQDA = 0
}
McQDA.push((sumQDA/divide)*100)
}
var McRF = []
const performanceAlgRF = JSON.parse(this.ModelsPerformance[62])
const performanceAlgRF = JSON.parse(this.ModelsPerformance[69])
for (let j = 0; j < Object.values(performanceAlgRF['mean_test_accuracy']).length; j++) {
let sumRF
sumRF = (factorsLocal[0] * Object.values(performanceAlgRF['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgRF['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgRF['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgRF['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgRF['geometric_mean_score_macro'])[j])
@ -163,13 +142,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgRF['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgRF['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgRF['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgRF['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgRF['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgRF['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgRF['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgRF['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgRF['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgRF['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgRF['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgRF['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgRF['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgRF['log_loss'])[j]))
if (sumRF <= 0) {
sumRF = 0
}
McRF.push((sumRF/divide)*100)
}
var McExtraT = []
const performanceAlgExtraT = JSON.parse(this.ModelsPerformance[70])
const performanceAlgExtraT = JSON.parse(this.ModelsPerformance[78])
for (let j = 0; j < Object.values(performanceAlgExtraT['mean_test_accuracy']).length; j++) {
let sumExtraT
sumExtraT = (factorsLocal[0] * Object.values(performanceAlgExtraT['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgExtraT['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgExtraT['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgExtraT['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgExtraT['geometric_mean_score_macro'])[j])
@ -177,13 +153,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgExtraT['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgExtraT['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgExtraT['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgExtraT['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgExtraT['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgExtraT['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgExtraT['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgExtraT['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgExtraT['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgExtraT['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgExtraT['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgExtraT['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgExtraT['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgExtraT['log_loss'])[j]))
if (sumExtraT <= 0) {
sumExtraT = 0
}
McExtraT.push((sumExtraT/divide)*100)
}
var McAdaB = []
const performanceAlgAdaB = JSON.parse(this.ModelsPerformance[78])
const performanceAlgAdaB = JSON.parse(this.ModelsPerformance[87])
for (let j = 0; j < Object.values(performanceAlgAdaB['mean_test_accuracy']).length; j++) {
let sumAdaB
sumAdaB = (factorsLocal[0] * Object.values(performanceAlgAdaB['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgAdaB['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgAdaB['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgAdaB['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgAdaB['geometric_mean_score_macro'])[j])
@ -191,13 +164,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgAdaB['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgAdaB['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgAdaB['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgAdaB['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgAdaB['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgAdaB['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgAdaB['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgAdaB['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgAdaB['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgAdaB['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgAdaB['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgAdaB['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgAdaB['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgAdaB['log_loss'])[j]))
if (sumAdaB <= 0) {
sumAdaB = 0
}
McAdaB.push((sumAdaB/divide)*100)
}
var McGradB = []
const performanceAlgGradB = JSON.parse(this.ModelsPerformance[86])
const performanceAlgGradB = JSON.parse(this.ModelsPerformance[96])
for (let j = 0; j < Object.values(performanceAlgGradB['mean_test_accuracy']).length; j++) {
let sumGradB
sumGradB = (factorsLocal[0] * Object.values(performanceAlgGradB['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgGradB['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgGradB['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgGradB['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgGradB['geometric_mean_score_macro'])[j])
@ -205,9 +175,6 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgGradB['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgGradB['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgGradB['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgGradB['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgGradB['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgGradB['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgGradB['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgGradB['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgGradB['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgGradB['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgGradB['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgGradB['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgGradB['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgGradB['log_loss'])[j]))
if (sumGradB <= 0) {
sumGradB = 0
}
McGradB.push((sumGradB/divide)*100)
}
@ -216,34 +183,34 @@ export default {
Combined = JSON.parse(this.ModelsPerformance[1])
colorGiv = colors[0]
} else if (this.selAlgorithm == 'SVC') {
Combined = JSON.parse(this.ModelsPerformance[9])
Combined = JSON.parse(this.ModelsPerformance[10])
colorGiv = colors[1]
} else if (this.selAlgorithm == 'GausNB') {
Combined = JSON.parse(this.ModelsPerformance[17])
Combined = JSON.parse(this.ModelsPerformance[19])
colorGiv = colors[2]
} else if (this.selAlgorithm == 'MLP') {
Combined = JSON.parse(this.ModelsPerformance[25])
Combined = JSON.parse(this.ModelsPerformance[28])
colorGiv = colors[3]
} else if (this.selAlgorithm == 'LR') {
Combined = JSON.parse(this.ModelsPerformance[33])
Combined = JSON.parse(this.ModelsPerformance[37])
colorGiv = colors[4]
} else if (this.selAlgorithm == 'LDA') {
Combined = JSON.parse(this.ModelsPerformance[41])
Combined = JSON.parse(this.ModelsPerformance[46])
colorGiv = colors[5]
} else if (this.selAlgorithm == 'QDA') {
Combined = JSON.parse(this.ModelsPerformance[49])
Combined = JSON.parse(this.ModelsPerformance[55])
colorGiv = colors[6]
} else if (this.selAlgorithm == 'RF') {
Combined = JSON.parse(this.ModelsPerformance[57])
Combined = JSON.parse(this.ModelsPerformance[64])
colorGiv = colors[7]
} else if (this.selAlgorithm == 'ExtraT') {
Combined = JSON.parse(this.ModelsPerformance[65])
Combined = JSON.parse(this.ModelsPerformance[73])
colorGiv = colors[8]
} else if (this.selAlgorithm == 'AdaB') {
Combined = JSON.parse(this.ModelsPerformance[73])
Combined = JSON.parse(this.ModelsPerformance[82])
colorGiv = colors[9]
} else {
Combined = JSON.parse(this.ModelsPerformance[81])
Combined = JSON.parse(this.ModelsPerformance[91])
colorGiv = colors[10]
}
var valuesPerf = Object.values(Combined['params'])

@ -39,6 +39,7 @@ export default {
parameters: [],
algorithm1: [],
algorithm2: [],
activeTabVal: true,
factors: [1,1,1,0,0
,1,0,0,1,0
,0,1,0,0,0
@ -60,16 +61,16 @@ export default {
// retrieve models ID
const AlgorKNNIDs = this.PerformanceAllModels[0]
const AlgorSVCIDs = this.PerformanceAllModels[8]
const AlgorGausNBIDs = this.PerformanceAllModels[16]
const AlgorMLPIDs = this.PerformanceAllModels[24]
const AlgorLRIDs = this.PerformanceAllModels[32]
const AlgorLDAIDs = this.PerformanceAllModels[40]
const AlgorQDAIDs = this.PerformanceAllModels[48]
const AlgorRFIDs = this.PerformanceAllModels[56]
const AlgorExtraTIDs = this.PerformanceAllModels[64]
const AlgorAdaBIDs = this.PerformanceAllModels[72]
const AlgorGradBIDs = this.PerformanceAllModels[80]
const AlgorSVCIDs = this.PerformanceAllModels[9]
const AlgorGausNBIDs = this.PerformanceAllModels[18]
const AlgorMLPIDs = this.PerformanceAllModels[27]
const AlgorLRIDs = this.PerformanceAllModels[36]
const AlgorLDAIDs = this.PerformanceAllModels[45]
const AlgorQDAIDs = this.PerformanceAllModels[54]
const AlgorRFIDs = this.PerformanceAllModels[63]
const AlgorExtraTIDs = this.PerformanceAllModels[72]
const AlgorAdaBIDs = this.PerformanceAllModels[81]
const AlgorGradBIDs = this.PerformanceAllModels[90]
var factorsLocal = this.factors
var divide = 0
@ -87,13 +88,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgKNN['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgKNN['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgKNN['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgKNN['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgKNN['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgKNN['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgKNN['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgKNN['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgKNN['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgKNN['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgKNN['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgKNN['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgKNN['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgKNN['log_loss'])[j]))
if (sumKNN <= 0) {
sumKNN = 0
}
McKNN.push((sumKNN/divide)*100)
}
var McSVC = []
const performanceAlgSVC = JSON.parse(this.PerformanceAllModels[14])
const performanceAlgSVC = JSON.parse(this.PerformanceAllModels[15])
for (let j = 0; j < Object.values(performanceAlgSVC['mean_test_accuracy']).length; j++) {
let sumSVC
sumSVC = (factorsLocal[0] * Object.values(performanceAlgSVC['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgSVC['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgSVC['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgSVC['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgSVC['geometric_mean_score_macro'])[j])
@ -101,13 +99,11 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgSVC['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgSVC['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgSVC['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgSVC['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgSVC['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgSVC['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgSVC['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgSVC['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgSVC['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgSVC['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgSVC['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgSVC['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgSVC['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgSVC['log_loss'])[j]))
if (sumSVC <= 0) {
sumSVC = 0
}
McSVC.push((sumSVC/divide)*100)
}
var McGausNB = []
const performanceAlgGausNB = JSON.parse(this.PerformanceAllModels[22])
const performanceAlgGausNB = JSON.parse(this.PerformanceAllModels[24])
for (let j = 0; j < Object.values(performanceAlgGausNB['mean_test_accuracy']).length; j++) {
let sumGausNB
sumGausNB = (factorsLocal[0] * Object.values(performanceAlgGausNB['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgGausNB['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgGausNB['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgGausNB['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgGausNB['geometric_mean_score_macro'])[j])
@ -115,13 +111,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgGausNB['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgGausNB['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgGausNB['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgGausNB['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgGausNB['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgGausNB['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgGausNB['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgGausNB['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgGausNB['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgGausNB['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgGausNB['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgGausNB['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgGausNB['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgGausNB['log_loss'])[j]))
if (sumGausNB <= 0) {
sumGausNB = 0
}
McGausNB.push((sumGausNB/divide)*100)
}
var McMLP = []
const performanceAlgMLP = JSON.parse(this.PerformanceAllModels[30])
const performanceAlgMLP = JSON.parse(this.PerformanceAllModels[33])
for (let j = 0; j < Object.values(performanceAlgMLP['mean_test_accuracy']).length; j++) {
let sumMLP
sumMLP = (factorsLocal[0] * Object.values(performanceAlgMLP['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgMLP['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgMLP['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgMLP['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgMLP['geometric_mean_score_macro'])[j])
@ -129,13 +122,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgMLP['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgMLP['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgMLP['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgMLP['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgMLP['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgMLP['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgMLP['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgMLP['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgMLP['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgMLP['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgMLP['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgMLP['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgMLP['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgMLP['log_loss'])[j]))
if (sumMLP <= 0) {
sumMLP = 0
}
McMLP.push((sumMLP/divide)*100)
}
var McLR = []
const performanceAlgLR = JSON.parse(this.PerformanceAllModels[38])
const performanceAlgLR = JSON.parse(this.PerformanceAllModels[42])
for (let j = 0; j < Object.values(performanceAlgLR['mean_test_accuracy']).length; j++) {
let sumLR
sumLR = (factorsLocal[0] * Object.values(performanceAlgLR['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgLR['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgLR['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgLR['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgLR['geometric_mean_score_macro'])[j])
@ -143,13 +133,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgLR['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgLR['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgLR['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgLR['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgLR['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgLR['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgLR['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgLR['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgLR['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgLR['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgLR['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgLR['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgLR['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgLR['log_loss'])[j]))
if (sumLR <= 0) {
sumLR = 0
}
McLR.push((sumLR/divide)*100)
}
var McLDA = []
const performanceAlgLDA = JSON.parse(this.PerformanceAllModels[46])
const performanceAlgLDA = JSON.parse(this.PerformanceAllModels[51])
for (let j = 0; j < Object.values(performanceAlgLDA['mean_test_accuracy']).length; j++) {
let sumLDA
sumLDA = (factorsLocal[0] * Object.values(performanceAlgLDA['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgLDA['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgLDA['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgLDA['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgLDA['geometric_mean_score_macro'])[j])
@ -157,13 +144,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgLDA['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgLDA['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgLDA['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgLDA['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgLDA['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgLDA['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgLDA['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgLDA['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgLDA['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgLDA['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgLDA['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgLDA['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgLDA['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgLDA['log_loss'])[j]))
if (sumLDA <= 0) {
sumLDA = 0
}
McLDA.push((sumLDA/divide)*100)
}
var McQDA = []
const performanceAlgQDA = JSON.parse(this.PerformanceAllModels[54])
const performanceAlgQDA = JSON.parse(this.PerformanceAllModels[60])
for (let j = 0; j < Object.values(performanceAlgQDA['mean_test_accuracy']).length; j++) {
let sumQDA
sumQDA = (factorsLocal[0] * Object.values(performanceAlgQDA['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgQDA['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgQDA['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgQDA['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgQDA['geometric_mean_score_macro'])[j])
@ -171,13 +155,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgQDA['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgQDA['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgQDA['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgQDA['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgQDA['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgQDA['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgQDA['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgQDA['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgQDA['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgQDA['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgQDA['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgQDA['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgQDA['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgQDA['log_loss'])[j]))
if (sumQDA <= 0) {
sumQDA = 0
}
McQDA.push((sumQDA/divide)*100)
}
var McRF = []
const performanceAlgRF = JSON.parse(this.PerformanceAllModels[62])
const performanceAlgRF = JSON.parse(this.PerformanceAllModels[69])
for (let j = 0; j < Object.values(performanceAlgRF['mean_test_accuracy']).length; j++) {
let sumRF
sumRF = (factorsLocal[0] * Object.values(performanceAlgRF['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgRF['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgRF['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgRF['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgRF['geometric_mean_score_macro'])[j])
@ -185,13 +166,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgRF['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgRF['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgRF['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgRF['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgRF['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgRF['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgRF['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgRF['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgRF['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgRF['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgRF['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgRF['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgRF['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgRF['log_loss'])[j]))
if (sumRF <= 0) {
sumRF = 0
}
McRF.push((sumRF/divide)*100)
}
var McExtraT = []
const performanceAlgExtraT = JSON.parse(this.PerformanceAllModels[70])
const performanceAlgExtraT = JSON.parse(this.PerformanceAllModels[78])
for (let j = 0; j < Object.values(performanceAlgExtraT['mean_test_accuracy']).length; j++) {
let sumExtraT
sumExtraT = (factorsLocal[0] * Object.values(performanceAlgExtraT['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgExtraT['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgExtraT['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgExtraT['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgExtraT['geometric_mean_score_macro'])[j])
@ -199,13 +177,10 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgExtraT['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgExtraT['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgExtraT['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgExtraT['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgExtraT['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgExtraT['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgExtraT['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgExtraT['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgExtraT['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgExtraT['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgExtraT['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgExtraT['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgExtraT['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgExtraT['log_loss'])[j]))
if (sumExtraT <= 0) {
sumExtraT = 0
}
McExtraT.push((sumExtraT/divide)*100)
}
var McAdaB = []
const performanceAlgAdaB = JSON.parse(this.PerformanceAllModels[78])
const performanceAlgAdaB = JSON.parse(this.PerformanceAllModels[87])
for (let j = 0; j < Object.values(performanceAlgAdaB['mean_test_accuracy']).length; j++) {
let sumAdaB
sumAdaB = (factorsLocal[0] * Object.values(performanceAlgAdaB['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgAdaB['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgAdaB['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgAdaB['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgAdaB['geometric_mean_score_macro'])[j])
@ -219,7 +194,7 @@ export default {
McAdaB.push((sumAdaB/divide)*100)
}
var McGradB = []
const performanceAlgGradB = JSON.parse(this.PerformanceAllModels[86])
const performanceAlgGradB = JSON.parse(this.PerformanceAllModels[96])
for (let j = 0; j < Object.values(performanceAlgGradB['mean_test_accuracy']).length; j++) {
let sumGradB
sumGradB = (factorsLocal[0] * Object.values(performanceAlgGradB['mean_test_accuracy'])[j]) + (factorsLocal[1] * (Object.values(performanceAlgGradB['mean_test_neg_mean_absolute_error'])[j]) + 1) + (factorsLocal[2] * (Object.values(performanceAlgGradB['mean_test_neg_root_mean_squared_error'])[j]) + 1) + (factorsLocal[3] * Object.values(performanceAlgGradB['geometric_mean_score_micro'])[j]) + (factorsLocal[4] * Object.values(performanceAlgGradB['geometric_mean_score_macro'])[j])
@ -227,24 +202,21 @@ export default {
+ (factorsLocal[10] * Object.values(performanceAlgGradB['mean_test_recall_macro'])[j]) + (factorsLocal[11] * Object.values(performanceAlgGradB['mean_test_recall_weighted'])[j]) + (factorsLocal[12] * Object.values(performanceAlgGradB['f5_micro'])[j]) + (factorsLocal[13] * Object.values(performanceAlgGradB['f5_macro'])[j]) + (factorsLocal[14] * Object.values(performanceAlgGradB['f5_weighted'])[j]) + (factorsLocal[15] * Object.values(performanceAlgGradB['f1_micro'])[j])
+ (factorsLocal[16] * Object.values(performanceAlgGradB['f1_macro'])[j]) + (factorsLocal[17] * Object.values(performanceAlgGradB['f1_weighted'])[j]) + (factorsLocal[18] * Object.values(performanceAlgGradB['f2_micro'])[j]) + (factorsLocal[19] * Object.values(performanceAlgGradB['f2_macro'])[j]) + (factorsLocal[20] * Object.values(performanceAlgGradB['f2_weighted'])[j]) + (factorsLocal[21] * Object.values(performanceAlgGradB['matthews_corrcoef'])[j])
+ (factorsLocal[22] * Object.values(performanceAlgGradB['mean_test_roc_auc_ovo_weighted'])[j]) + (factorsLocal[23] * (1 - Object.values(performanceAlgGradB['log_loss'])[j]))
if (sumGradB <= 0) {
sumGradB = 0
}
McGradB.push((sumGradB/divide)*100)
}
// retrieve the results like performance
const PerformAlgorKNN = JSON.parse(this.PerformanceAllModels[1])
const PerformAlgorSVC = JSON.parse(this.PerformanceAllModels[9])
const PerformAlgorGausNB = JSON.parse(this.PerformanceAllModels[17])
const PerformAlgorMLP = JSON.parse(this.PerformanceAllModels[25])
const PerformAlgorLR = JSON.parse(this.PerformanceAllModels[33])
const PerformAlgorLDA = JSON.parse(this.PerformanceAllModels[41])
const PerformAlgorQDA = JSON.parse(this.PerformanceAllModels[49])
const PerformAlgorRF = JSON.parse(this.PerformanceAllModels[57])
const PerformAlgorExtraT = JSON.parse(this.PerformanceAllModels[65])
const PerformAlgorAdaB = JSON.parse(this.PerformanceAllModels[73])
const PerformAlgorGradB = JSON.parse(this.PerformanceAllModels[81])
const PerformAlgorSVC = JSON.parse(this.PerformanceAllModels[10])
const PerformAlgorGausNB = JSON.parse(this.PerformanceAllModels[19])
const PerformAlgorMLP = JSON.parse(this.PerformanceAllModels[28])
const PerformAlgorLR = JSON.parse(this.PerformanceAllModels[37])
const PerformAlgorLDA = JSON.parse(this.PerformanceAllModels[46])
const PerformAlgorQDA = JSON.parse(this.PerformanceAllModels[55])
const PerformAlgorRF = JSON.parse(this.PerformanceAllModels[64])
const PerformAlgorExtraT = JSON.parse(this.PerformanceAllModels[73])
const PerformAlgorAdaB = JSON.parse(this.PerformanceAllModels[82])
const PerformAlgorGradB = JSON.parse(this.PerformanceAllModels[91])
// initialize/instansiate algorithms and parameters
this.algorithmKNN = []
@ -265,7 +237,7 @@ export default {
this.parameters.push(JSON.stringify(Object.values(PerformAlgorKNN['params'])[j]))
}
for (let j = 0; j < Object.keys(PerformAlgorSVC['params']).length; j++) {
this.algorithmSVC.push({'# Performance (%) #': McSVC[j],Algorithm:'C-Support Vector Classification',Model:'Model ' + AlgorSVCIDs[j] + '; Parameters '+JSON.stringify(Object.values(PerformAlgorSVC['params'])[j])+'; # Performance (%) # ',ModelID:AlgorSVCIDs[j]})
this.algorithmSVC.push({'# Performance (%) #': McSVC[j],Algorithm:'C-Support Vector Classif',Model:'Model ' + AlgorSVCIDs[j] + '; Parameters '+JSON.stringify(Object.values(PerformAlgorSVC['params'])[j])+'; # Performance (%) # ',ModelID:AlgorSVCIDs[j]})
this.parameters.push(JSON.stringify(Object.values(PerformAlgorSVC['params'])[j]))
}
for (let j = 0; j < Object.keys(PerformAlgorGausNB['params']).length; j++) {
@ -310,7 +282,7 @@ export default {
this.parameters.push(JSON.stringify(Object.values(PerformAlgorKNN['params'])[j]))
}
for (let j = 0; j < Object.keys(PerformAlgorSVC['params']).length; j++) {
this.algorithmSVC.push({'# Performance (%) #': this.listClassPerf[1][j],Algorithm:'C-Support Vector Classification',Model:'Model ' + AlgorSVCIDs[j] + '; Parameters '+JSON.stringify(Object.values(PerformAlgorSVC['params'])[j])+'; # Performance (%) # ',ModelID:AlgorSVCIDs[j]})
this.algorithmSVC.push({'# Performance (%) #': this.listClassPerf[1][j],Algorithm:'C-Support Vector Classif',Model:'Model ' + AlgorSVCIDs[j] + '; Parameters '+JSON.stringify(Object.values(PerformAlgorSVC['params'])[j])+'; # Performance (%) # ',ModelID:AlgorSVCIDs[j]})
this.parameters.push(JSON.stringify(Object.values(PerformAlgorSVC['params'])[j]))
}
for (let j = 0; j < Object.keys(PerformAlgorGausNB['params']).length; j++) {
@ -413,7 +385,7 @@ export default {
EventBus.$emit('updateBarChart', [])
}
el[1].onclick = function() {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point C-Support Vector Classification')
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point C-Support Vector Classif')
for (let i = 0; i < allPoints.length; i++) {
allPoints[i].style.fill = previousColor[1]
allPoints[i].style.opacity = '1.0'
@ -609,10 +581,10 @@ export default {
EventBus.$emit('alternateFlagLock')
EventBus.$emit('updateBarChart', [])
}
// check if brushed through all boxplots and not only one at a time
const myObserver = new ResizeObserver(entries => {
EventBus.$emit('brusheAllOn')
if (this.activeTabVal) {
(EventBus.$emit('brusheAllOn'))
}
})
var brushRect = document.querySelector('.extent')
myObserver.observe(brushRect);
@ -630,7 +602,7 @@ export default {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point K-Nearest Neighbors')
algorithm = this.algorithmKNN
} else if (this.AllAlgorithms[j] === 'SVC') {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point C-Support Vector Classification')
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point C-Support Vector Classif')
algorithm = this.algorithmSVC
} else if (this.AllAlgorithms[j] === 'GausNB') {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point Gaussian Naive Bayes')
@ -805,7 +777,7 @@ export default {
if (this.selectedAlgorithm === 'KNN') {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point K-Nearest Neighbors')
} else if (this.selectedAlgorithm === 'SVC') {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point C-Support Vector Classification')
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point C-Support Vector Classif')
} else if (this.selectedAlgorithm === 'GausNB') {
var allPoints = document.getElementsByClassName('d3-exploding-boxplot point Gaussian Naive Bayes')
} else if (this.selectedAlgorithm === 'MLP') {
@ -975,7 +947,7 @@ export default {
activeModels.push(allPoints[i].__data__.Model)
if (allPoints[i].__data__.Algorithm === 'K-Nearest Neighbors') {
algorithmsSelected.push('KNN')
} else if (allPoints[i].__data__.Algorithm === 'C-Support Vector Classification') {
} else if (allPoints[i].__data__.Algorithm === 'C-Support Vector Classif') {
algorithmsSelected.push('SVC')
} else if (allPoints[i].__data__.Algorithm === 'Gaussian Naive Bayes') {
algorithmsSelected.push('GausNB')
@ -1019,7 +991,7 @@ export default {
activeModels.push(allPoints[i].__data__.Model)
if (allPoints[i].__data__.Algorithm === 'K-Nearest Neighbors') {
algorithmsSelected.push('KNN')
} else if (allPoints[i].__data__.Algorithm === 'C-Support Vector Classification') {
} else if (allPoints[i].__data__.Algorithm === 'C-Support Vector Classif') {
algorithmsSelected.push('SVC')
} else if (allPoints[i].__data__.Algorithm === 'Gaussian Naive Bayes') {
algorithmsSelected.push('GausNB')
@ -1086,6 +1058,8 @@ export default {
},
},
mounted () {
EventBus.$on('Algorithm', data => { this.activeTabVal = data })
EventBus.$on('emittedEventCallingModelBrushed', this.selectedPointsPerAlgorithm)
EventBus.$on('emittedEventCallingAllAlgorithms', data => {
this.PerformanceAllModels = data})

@ -46,7 +46,7 @@
performancePerModel.forEach(element => {
let el = {}
el.type = "variable 1"
el.value = element * 100
el.value = element
data.push(el)
})
@ -54,14 +54,14 @@
performancePerModel.forEach(element => {
let el = {}
el.type = "variable 2"
el.value = element * 100
el.value = element
data.push(el)
})
} else {
performancePerModelSelection.forEach(element => {
let el = {}
el.type = "variable 2"
el.value = element * 100
el.value = element
data.push(el)
})
}
@ -92,7 +92,7 @@
var histogram = d3.histogram()
.value(function(d) { return +d.value; }) // I need to give the vector of value
.domain(x.domain()) // then the domain of the graphic
.thresholds(x.ticks(40)); // then the numbers of bins
.thresholds(x.ticks(10)); // then the numbers of bins
// And apply twice this function to data to get the bins.
var bins1 = histogram(data.filter( function(d){return d.type === "variable 1"} ));
@ -104,7 +104,7 @@
.domain([0, d3.max(bins1, function(d) { return d.length; })]); // d3.hist has to be called before the Y axis obviously
svg.append("g")
.attr("transform", "translate(-20,0)")
.call(d3.axisLeft(y1));
.call(d3.axisLeft(y1).ticks(5).tickSizeOuter(0));
// Y axis: scale and draw:
var y2 = d3.scaleLinear()
@ -112,7 +112,7 @@
.domain([0, d3.max(bins2, function(d) { return d.length; })]); // d3.hist has to be called before the Y axis obviously
svg.append("g")
.attr("transform", "translate(-20,0)")
.call(d3.axisLeft(y2));
.call(d3.axisLeft(y2).ticks(5).tickSizeOuter(0));
// Add a tooltip div. Here I define the general feature of the tooltip: stuff that do not depend on the data point.
// Its opacity is set to 0: we don't see it by default.

@ -25,6 +25,7 @@ export default {
,0,1,1,1
],
SVCModels: 576,
tNameAll: '',
GausNBModels: 736,
MLPModels: 1236,
LRModels: 1356,
@ -41,16 +42,16 @@ export default {
methods: {
BarChartView () {
const PerClassMetricsKNN = JSON.parse(this.PerformanceResults[2])
const PerClassMetricsSVC = JSON.parse(this.PerformanceResults[10])
const PerClassMetricsGausNB = JSON.parse(this.PerformanceResults[18])
const PerClassMetricsMLP = JSON.parse(this.PerformanceResults[26])
const PerClassMetricsLR = JSON.parse(this.PerformanceResults[34])
const PerClassMetricsLDA = JSON.parse(this.PerformanceResults[42])
const PerClassMetricsQDA = JSON.parse(this.PerformanceResults[50])
const PerClassMetricsRF = JSON.parse(this.PerformanceResults[58])
const PerClassMetricsExtraT = JSON.parse(this.PerformanceResults[66])
const PerClassMetricsAdaB = JSON.parse(this.PerformanceResults[74])
const PerClassMetricsGradB = JSON.parse(this.PerformanceResults[82])
const PerClassMetricsSVC = JSON.parse(this.PerformanceResults[11])
const PerClassMetricsGausNB = JSON.parse(this.PerformanceResults[20])
const PerClassMetricsMLP = JSON.parse(this.PerformanceResults[29])
const PerClassMetricsLR = JSON.parse(this.PerformanceResults[38])
const PerClassMetricsLDA = JSON.parse(this.PerformanceResults[47])
const PerClassMetricsQDA = JSON.parse(this.PerformanceResults[56])
const PerClassMetricsRF = JSON.parse(this.PerformanceResults[65])
const PerClassMetricsExtraT = JSON.parse(this.PerformanceResults[74])
const PerClassMetricsAdaB = JSON.parse(this.PerformanceResults[83])
const PerClassMetricsGradB = JSON.parse(this.PerformanceResults[92])
var KNNModels = []
var SVCModels = []
@ -461,8 +462,36 @@ export default {
}
for (var i = 0; i < target_names.length; i++) {
traces[i] = {
x: ['K-Nearest Neighbors','C-Support Vector Classifier','Gaussian Naive Bayes','Multilayer Perceptron','Logistic Regression','Linear Discrim Analysis','Quadratic Discrim Analysis','Random Forest','Extra Trees','AdaBoost','Gradient Boosting'],
if (this.tNameAll == target_names[i]) {
traces[i] = {
x: ['K-Nearest Neighbors','C-Support Vector Classif','Gaussian Naive Bayes','Multilayer Perceptron','Logistic Regression','Linear Discrim Analysis','Quadratic Discrim Analysis','Random Forest','Extra Trees','AdaBoost','Gradient Boosting'],
y: sumList[i],
name: '<b>'+target_names[i]+'</b>',
opacity: 0.5,
marker: {
opacity: 0.5,
color: this.colorsValues[i]
},
type: 'bar'
};
tracesSel[i] = {
type: 'bar',
x: ['K-Nearest Neighbors','C-Support Vector Classif','Gaussian Naive Bayes','Multilayer Perceptron','Logistic Regression','Linear Discrim Analysis','Quadratic Discrim Analysis','Random Forest','Extra Trees','AdaBoost','Gradient Boosting'],
y: sumLineList[i],
name: '<b>'+target_names[i]+' (Sel)</b>',
xaxis: 'x2',
mode: 'markers',
marker: {
opacity: 1.0,
color: this.colorsValues[i],
},
width: [0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06]
};
data.push(traces[i])
data.push(tracesSel[i])
} else {
traces[i] = {
x: ['K-Nearest Neighbors','C-Support Vector Classif','Gaussian Naive Bayes','Multilayer Perceptron','Logistic Regression','Linear Discrim Analysis','Quadratic Discrim Analysis','Random Forest','Extra Trees','AdaBoost','Gradient Boosting'],
y: sumList[i],
name: target_names[i],
opacity: 0.5,
@ -474,7 +503,7 @@ export default {
};
tracesSel[i] = {
type: 'bar',
x: ['K-Nearest Neighbors','C-Support Vector Classifier','Gaussian Naive Bayes','Multilayer Perceptron','Logistic Regression','Linear Discrim Analysis','Quadratic Discrim Analysis','Random Forest','Extra Trees','AdaBoost','Gradient Boosting'],
x: ['K-Nearest Neighbors','C-Support Vector Classif','Gaussian Naive Bayes','Multilayer Perceptron','Logistic Regression','Linear Discrim Analysis','Quadratic Discrim Analysis','Random Forest','Extra Trees','AdaBoost','Gradient Boosting'],
y: sumLineList[i],
name: target_names[i]+' (Sel)',
xaxis: 'x2',
@ -488,15 +517,19 @@ export default {
data.push(traces[i])
data.push(tracesSel[i])
}
}
var barc = document.getElementById('barChart');
var config = {scrollZoom: true, displaylogo: false, showLink: false, showSendToCloud: false, modeBarButtonsToRemove: ['toImage'], responsive: true}
Plotly.newPlot(barc, data, layout)
Plotly.newPlot(barc, data, layout, config)
barc.on('plotly_click', (eventData) => {
var tName
eventData.points.forEach((e) => {
tName = e.data.name.replace(/ *\([^)]*\) */g, "")
});
this.tNameAll = tName
EventBus.$emit('clearPCP')
EventBus.$emit('alternateFlagLock')
EventBus.$emit('boxplotSet', [storeKNN[tName],storeSVC[tName],storeGausNB[tName],storeMLP[tName],storeLR[tName],storeLDA[tName],storeQDA[tName],storeRF[tName],storeExtraT[tName],storeAdaB[tName],storeGradB[tName]])
@ -511,6 +544,8 @@ export default {
}
},
mounted() {
EventBus.$on('EraseSelectionBarChart', data => { this.tNameAll = data })
EventBus.$on('updateBarChartAlgorithm', data => { this.algorithmsinBar = data })
EventBus.$on('updateBarChart', data => { this.modelsSelectedinBar = data })
EventBus.$on('updateBarChart', this.BarChartView)

@ -1,15 +1,17 @@
<template>
<div>
<div align="center">
Projection Selection: <select id="selectBarChartData" @change="selectVisualRepresentationData()">
<option value="mds" selected>MDS Projection</option>
<option value="tsne">t-SNE Projection</option>
<option value="umap">UMAP Projection</option>
Projection Method: <select id="selectBarChartData" @change="selectVisualRepresentationData()">
<option value="mds" selected>MDS</option>
<option value="tsne">t-SNE</option>
<option value="umap">UMAP</option>
</select>
&nbsp;&nbsp;
Filter: <select id="selectFilterID" @change="selectAppliedFilter()">
<option value="mean" selected>Mean</option>
<option value="median">Median</option>
</select>
&nbsp;&nbsp;
Action: <button
id="mergeID"
v-on:click="merge">
@ -28,6 +30,7 @@
<font-awesome-icon icon="eraser" />
{{ removeData }}
</button>
&nbsp;&nbsp;
History Manager: <button
id="saveID"
v-on:click="save">
@ -62,7 +65,6 @@ export default {
composeData: 'Compose',
saveData: 'Save Step',
restoreData: 'Restore Step',
instanceImpSize: '',
userSelectedFilter: 'mean',
responsiveWidthHeight: [],
colorsValues: ['#808000','#008080','#bebada','#fccde5','#d9d9d9','#bc80bd','#ccebc5'],
@ -97,6 +99,15 @@ export default {
restore () {
EventBus.$emit('SendProvenance', 'restore')
},
clean(obj) {
var propNames = Object.getOwnPropertyNames(obj);
for (var i = 0; i < propNames.length; i++) {
var propName = propNames[i];
if (obj[propName] === null || obj[propName] === undefined) {
delete obj[propName];
}
}
},
scatterPlotDataView () {
Plotly.purge('OverviewDataPlotly')
@ -110,8 +121,30 @@ export default {
const DataSetY = JSON.parse(this.dataPoints[3])
const originalDataLabels = JSON.parse(this.dataPoints[4])
var DataSetParse = JSON.parse(DataSet)
var stringParameters = []
for (let i = 0; i < DataSetParse.length; i++) {
this.clean(DataSetParse[i])
stringParameters.push(JSON.stringify(DataSetParse[i]).replace(/,/gi, '<br>'))
}
const XandYCoordinatesTSNE = JSON.parse(this.dataPoints[5])
const XandYCoordinatesUMAP = JSON.parse(this.dataPoints[6])
const impSizeArray = JSON.parse(this.dataPoints[7])
const KNNSize = JSON.parse(impSizeArray[8])
const SVCSize = JSON.parse(impSizeArray[17])
const GausNBSize = JSON.parse(impSizeArray[26])
const MLPSize = JSON.parse(impSizeArray[35])
const LRSize = JSON.parse(impSizeArray[44])
const LDASize = JSON.parse(impSizeArray[53])
const QDASize = JSON.parse(impSizeArray[62])
const RFSize = JSON.parse(impSizeArray[71])
const ExtraTSize = JSON.parse(impSizeArray[80])
const AdaBSize = JSON.parse(impSizeArray[89])
const GradBSize = JSON.parse(impSizeArray[98])
var sizeScatterplot = []
for (let i = 0; i < KNNSize.length; i++) {
sizeScatterplot.push(((KNNSize[i] + SVCSize[i] + GausNBSize[i] + MLPSize[i] + LRSize[i] + LDASize[i] + QDASize[i] + RFSize[i] + ExtraTSize[i] + AdaBSize[i] + GradBSize[i]) / 11) * 12)
}
let intData = []
if (this.highlightedPoints.length > 0){
@ -126,14 +159,7 @@ export default {
var Xaxs = []
var Yaxs = []
var Opacity
var impSizeArray
if (this.instanceImpSize.length != 0) {
impSizeArray = JSON.parse(this.instanceImpSize)
}
console.log(impSizeArray)
if (this.representationDef == 'mds') {
for (let i = 0; i < XandYCoordinatesMDS[0].length; i++) {
Xaxs.push(XandYCoordinatesMDS[0][i])
@ -154,7 +180,7 @@ export default {
const aux_ID = result.ID.filter((item, index) => originalDataLabels[index] == target_names[i]);
var Text = aux_ID.map((item, index) => {
let popup = 'Data Point ID: ' + item + '; Details: ' + JSON.stringify(DataSetParse[item])
let popup = 'Data Point ID: ' + item + '<br> Details: ' + stringParameters[item]
return popup;
});
@ -176,7 +202,7 @@ export default {
y: aux_Y,
mode: 'markers',
name: target_names[i],
marker: { color: this.colorsValues[i], line: { color: 'rgb(0, 0, 0)', width: 2 }, opacity: Opacity, size: impSizeArray },
marker: { color: this.colorsValues[i], line: { color: 'rgb(0, 0, 0)', width: 2 }, opacity: Opacity, size: sizeScatterplot },
hovertemplate:
"<b>%{text}</b><br><br>" +
"<extra></extra>",
@ -185,7 +211,7 @@ export default {
}
layout = {
title: 'Data Space Projection (MDS)',
title: 'MDS Projection',
xaxis: {
visible: false
},
@ -197,6 +223,13 @@ export default {
autosize: true,
width: width,
height: height,
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
} else if (this.representationDef == 'tsne') {
result = XandYCoordinatesTSNE.reduce(function(r, a) {
@ -227,7 +260,7 @@ export default {
const aux_ID = result.ID.filter((item, index) => originalDataLabels[index] == target_names[i]);
var Text = aux_ID.map((item, index) => {
let popup = 'Data Point ID: ' + item + '; Details: ' + JSON.stringify(DataSetParse[item])
let popup = 'Data Point ID: ' + item + '<br> Details: ' + stringParameters[item]
return popup;
});
@ -248,7 +281,7 @@ export default {
y: aux_Y,
mode: 'markers',
name: target_names[i],
marker: { color: this.colorsValues[i], line: { color: 'rgb(0, 0, 0)', width: 2 }, opacity: Opacity, size: impSizeArray },
marker: { color: this.colorsValues[i], line: { color: 'rgb(0, 0, 0)', width: 2 }, opacity: Opacity, size: sizeScatterplot },
hovertemplate:
"<b>%{text}</b><br><br>" +
"<extra></extra>",
@ -257,7 +290,7 @@ export default {
}
layout = {
title: 'Data Space Projection (t-SNE)',
title: 't-SNE Projection',
xaxis: {
visible: false
},
@ -269,6 +302,13 @@ export default {
autosize: true,
width: width,
height: height,
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
} else {
for (let i = 0; i < XandYCoordinatesUMAP[0].length; i++) {
@ -289,7 +329,7 @@ export default {
const aux_ID = result.ID.filter((item, index) => originalDataLabels[index] == target_names[i]);
var Text = aux_ID.map((item, index) => {
let popup = 'Data Point ID: ' + item + '; Details: ' + JSON.stringify(DataSetParse[item])
let popup = 'Data Point ID: ' + item + '<br> Details: ' + stringParameters[item]
return popup;
});
@ -310,7 +350,7 @@ export default {
y: aux_Y,
mode: 'markers',
name: target_names[i],
marker: { color: this.colorsValues[i], line: { color: 'rgb(0, 0, 0)', width: 2 }, opacity: Opacity, size: impSizeArray },
marker: { color: this.colorsValues[i], line: { color: 'rgb(0, 0, 0)', width: 2 }, opacity: Opacity, size: sizeScatterplot },
hovertemplate:
"<b>%{text}</b><br><br>" +
"<extra></extra>",
@ -319,7 +359,7 @@ export default {
}
layout = {
title: 'Data Space Projection (UMAP)',
title: 'UMAP Projection',
xaxis: {
visible: false
},
@ -331,6 +371,13 @@ export default {
autosize: true,
width: width,
height: height,
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
}
@ -368,8 +415,6 @@ export default {
}
},
mounted() {
EventBus.$on('emittedEventCallingDataSpaceImportance', data => { this.instanceImpSize = data })
// initialize the first data space projection based on the data set
EventBus.$on('emittedEventCallingDataSpacePlotView', data => {
this.dataPoints = data})

@ -1,18 +1,58 @@
<template>
<div id="ExportResults">Results go here</div>
<div id="ExportResults">
Data Instances: {{ DataPickled }}
<br>
=======================================================
<br>
Data Features Per Model: {{ FeaturesPickled }}
<br>
=======================================================
<br>
Models IDs and Parameters: {{ ModelsPickled }}
</div>
</template>
<script>
import { EventBus } from '../main.js'
import * as Cryo from 'cryo'
export default {
name: 'Export',
data () {
return {
DataPickled: '',
FeaturesPickled: '',
ModelsPickled: '',
stackData: [],
stackFeatures: [],
stackModels: [],
}
},
methods: {
Pickle () {
this.DataPickled = Cryo.stringify(this.stackData)
this.FeaturesPickled = Cryo.stringify(this.stackFeatures)
this.ModelsPickled = Cryo.stringify(this.stackModels)
}
},
mounted () {
EventBus.$on('sendDatatoPickle', data => {
this.stackData = data})
EventBus.$on('sendDatatoPickle', this.Pickle)
EventBus.$on('sendSelectedFeaturestoPickle', data => {
this.stackFeatures = data})
EventBus.$on('sendSelectedFeaturestoPickle', this.Pickle)
EventBus.$on('ExtractResults', data => {
this.stackModels = data})
EventBus.$on('ExtractResults', this.Pickle)
}
}
</script>
</script>
<style scoped>
#ExportResults {
word-break: break-all !important;
}
</style>

@ -123,7 +123,7 @@ export default {
else if (this.Toggles[0] == 0 && this.Toggles[1] == 0 && this.Toggles[2] == 1) {
values[j] = FeaturesAccuracy[j][i]*100
} else {
alert('Please, keep at least one toggle active! The states of the toggles are being reset.') // Fix this!
alert('Please, keep at least one toggle active! The states of the toggles are being reset.')
this.Toggles[0] = 1
this.Toggles[1] = 1
this.Toggles[2] = 1
@ -382,6 +382,7 @@ export default {
}
finalresults.push(results)
}
EventBus.$emit('sendSelectedFeaturestoPickle', finalresults)
EventBus.$emit('SendSelectedFeaturesEvent', finalresults)
});
var svgLeg = d3.select("#LegendHeat").append("svg")

@ -143,7 +143,7 @@
<div id="myModal" class="w3-modal" style="position: fixed;">
<div class="w3-modal-content w3-card-4 w3-animate-zoom">
<header class="w3-container w3-blue">
<h3 style="display:inline-block; font-size: 16px; margin-top: 15px; margin-bottom:15px">Serialized Ensemble Learning Models Using Pickling</h3>
<h3 style="display:inline-block; font-size: 16px; margin-top: 15px; margin-bottom:15px">Serialized Data and Stacking Ensemble Learning Models using Cryo</h3>
</header>
<Export/>
<div class="w3-container w3-light-grey w3-padding">
@ -323,6 +323,7 @@ export default Vue.extend({
if (this.firstTimeFlag == 1) {
this.selectedModels_Stack.push(0)
this.selectedModels_Stack.push(JSON.stringify(this.modelsUpdate))
EventBus.$emit('ParametersProvenance', this.OverviewResults)
EventBus.$emit('InitializeProvenance', this.selectedModels_Stack)
}
this.firstTimeFlag = 0
@ -424,7 +425,7 @@ export default Vue.extend({
},
RemoveFromStackModels () {
const path = `http://127.0.0.1:5000/data/ServerRemoveFromStack`
const postData = {
ClassifiersList: this.ClassifierIDsList
}
@ -555,8 +556,6 @@ export default Vue.extend({
axios.get(path, axiosConfig)
.then(response => {
this.FinalResults = response.data.FinalResults
this.DataSpaceImportance()
EventBus.$emit('emittedEventCallingLinePlot', this.FinalResults)
})
.catch(error => {
@ -611,27 +610,6 @@ export default Vue.extend({
console.log(error)
})
},
DataSpaceImportance () {
const path = `http://localhost:5000/data/SendInstancesImportance`
const axiosConfig = {
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Origin, Content-Type, X-Auth-Token',
'Access-Control-Allow-Methods': 'GET, PUT, POST, DELETE, OPTIONS'
}
}
axios.get(path, axiosConfig)
.then(response => {
this.instancesImportance = response.data.instancesImportance
EventBus.$emit('emittedEventCallingDataSpaceImportance', this.instancesImportance)
this.DataSpaceCall()
})
.catch(error => {
console.log(error)
})
},
DataSpaceCallAfterDataManipulation () {
const path = `http://localhost:5000/data/requestDataSpaceResultsAfterDataManipulation`
@ -645,7 +623,8 @@ export default Vue.extend({
}
axios.get(path, axiosConfig)
.then(response => {
this.DataSpaceImportance()
console.log('Calling Data Space!')
this.DataSpaceCall()
})
.catch(error => {
console.log(error)
@ -932,6 +911,23 @@ export default Vue.extend({
console.log(error)
})
},
Alg () {
$('#profile-tab').on('click', function (e) {
EventBus.$emit('Algorithm', false)
})
$('#contact-tab').on('click', function (e) {
EventBus.$emit('Algorithm', false)
})
$('#home-tab').on('click', function (e) {
var delayInMilliseconds = 1000; //1 second
setTimeout(function() {
EventBus.$emit('Algorithm', true)
}, delayInMilliseconds);
})
},
},
created () {
// does the browser support the Navigation Timing API?
@ -946,6 +942,7 @@ export default Vue.extend({
window.addEventListener('resize', this.change)
},
mounted() {
this.Alg()
var modal = document.getElementById('myModal')
window.onclick = function(event) {
//alert(event.target)

@ -1,5 +1,5 @@
<template>
<div id="PCPDataView" class="parcoords" style="width: 1200px; height:280px"></div>
<div id="PCPDataView" class="parcoords"></div>
</template>
<script>
@ -17,7 +17,7 @@ export default {
data () {
return {
PCPDataReceived: '',
colorsValues: ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99']
colorsValues: ['#808000','#008080','#bebada','#fccde5','#d9d9d9','#bc80bd','#ccebc5']
}
},
methods: {
@ -29,10 +29,19 @@ export default {
const DataSetNew = JSON.parse(this.PCPDataReceived[2])
var DataSetParse = JSON.parse(DataSetNew)
const target_names = JSON.parse(this.PCPDataReceived[3])
var colors = this.colorsValues
const target_names_original = JSON.parse(this.PCPDataReceived[4])
this.pc = ParCoords()("#PCPDataView")
var extraction = []
for (let i = 0; i < DataSetParse.length; i++) {
extraction.push(Object.assign(DataSetParse[i], {Outcome: target_names_original[i]}))
}
var colors = this.colorsValues
EventBus.$emit('sendDatatoPickle', extraction)
var pc = ParCoords()("#PCPDataView")
.data(DataSetParse)
.width(1200)
.height(280)
.color(function(d, i) { return colors[target_names[i]] })
.bundlingStrength(0) // set bundling strength
.smoothness(0)

@ -73,7 +73,7 @@ export default {
labelFactor: 1.25, //How much farther than the radius of the outer circle should the labels be placed
wrapWidth: 60, //The number of pixels after which a label needs to be given a new line
opacityArea: 0.35, //The opacity of the area of the blob
dotRadius: 4, //The size of the colored circles of each blog
dotRadius: 2, //The size of the colored circles of each blog
opacityCircles: 0.1, //The opacity of the circles of each blob
strokeWidth: 2, //The width of the stroke around each blob
roundStrokes: false, //If true the area and stroke will follow a round path (cardinal-closed)
@ -387,7 +387,6 @@ export default {
// Clear Heatmap first
var svg = d3.select("#overview");
svg.selectAll("*").remove();
var widthinter = this.WH[0]*2 // interactive visualization
var heightinter = this.WH[1]*1.23 // interactive visualization
@ -496,7 +495,7 @@ export default {
} else if (this.storeActiveModels[0] > this.AdaBModels) {
this.allActiveAdaB = countAdaBRelated.slice()
} else if (this.storeActiveModels[0] > this.ExtraTModels) {
this.allActiveExtraT = countExtraT.slice()
this.allActiveExtraT = countExtraTRelated.slice()
} else if (this.storeActiveModels[0] > this.RFModels) {
this.allActiveRF = countRFRelated.slice()
} else if (this.storeActiveModels[0] > this.QDAModels) {

@ -12,53 +12,86 @@ export default {
return {
barchartmetrics: '',
WH: [],
SelBarChartMetrics: []
SelBarChartMetrics: [],
factors: [1,1,1,0,0
,1,0,0,1,0
,0,1,0,0,0
,0,0,1,0,0
,0,1,1,1
],
}
},
methods: {
LineBar () {
Plotly.purge('PerMetricBar')
var x = []
var metricsPerModel = JSON.parse(this.barchartmetrics[9])
var metricsPerModelSel = []
if (this.SelBarChartMetrics.length == 0) {
metricsPerModelSel = metricsPerModel
} else {
metricsPerModelSel = this.SelBarChartMetrics
}
console.log(metricsPerModel)
console.log(metricsPerModelSel)
var factorsLocal = this.factors
var perModelAllClear = []
var perModelSelectedClear = []
var resultsColors = []
var chooseFrom = ['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']
for (let i = 0; i < metricsPerModel.length; i++) {
if (factorsLocal[i] != 0) {
resultsColors.push(metricsPerModel[i])
}
var temp = metricsPerModel[i]
var resultsClear = JSON.parse(temp)
var tempSel = metricsPerModelSel[i]
var resultsClearSelected = JSON.parse(tempSel)
for (let j = 0; j < Object.values(resultsClear).length; j++) {
if (factorsLocal[i] != 0) {
perModelAllClear.push(Object.values(resultsClear)[j])
perModelSelectedClear.push(Object.values(resultsClearSelected)[j])
x.push(chooseFrom[i])
}
}
}
var width = this.WH[0]*6.5 // interactive visualization
var height = this.WH[1]*0.5 // interactive visualization
var trace1 = {
x: ['Accuracy','MAE','RMSE','G-Mean','Precision','Recall','F-Beta Sc','MCC','ROC AUC','Log Loss'],
y: metricsPerModel,
name: 'Projection average',
type: 'bar',
x: x,
y: perModelAllClear,
name: 'Performance Metrics',
type: 'box',
boxmean: true,
marker: {
color: 'rgb(0,0,0)'
}
};
var trace2 = {
x: ['Accuracy','MAE','RMSE','G-Mean','Precision','Recall','F-Beta Sc','MCC','ROC AUC','Log Loss'],
y: metricsPerModelSel,
x: x,
y: perModelSelectedClear,
name: 'Selected points',
type: 'bar',
type: 'box',
boxmean: true,
marker: {
color: 'rgb(211,211,211)'
}
};
var data = [trace1, trace2];
var layout = {
barmode: 'group',autosize: false,
boxmode: 'group',
autosize: true,
width: width,
height: height,
hovermode: 'x',
margin: {
l: 50,
r: 30,
b: 35,
t: 5,
pad: 4
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
xaxis: {
title: 'Performance Metrics',
@ -67,13 +100,61 @@ export default {
color: 'black'
}},
yaxis: {
title: 'Performance',
title: '# Performance (%) #',
titlefont: {
size: 12,
color: 'black'
}}};
var boxPlot = document.getElementById('PerMetricBar');
var config = {scrollZoom: true, displaylogo: false, showLink: false, showSendToCloud: false, modeBarButtonsToRemove: ['toImage'], responsive: true}
Plotly.newPlot(boxPlot, data, layout, config);
Plotly.newPlot('PerMetricBar', data, layout, {displayModeBar:false}, {staticPlot: true});
boxPlot.on('plotly_click', (eventData) => {
var xAxisHovered
xAxisHovered = eventData.points[0].x
var index
if (xAxisHovered == 'Accuracy') {
Plotly.restyle(boxPlot, 'x', [['<b>Accuracy</b>','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']]);
index = 0
}
else if (xAxisHovered == 'MAE') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','<b>MAE</b>','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']]);
index = 1
}
else if (xAxisHovered == 'RMSE') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','<b>RMSE</b>','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']]);
index = 2
}
else if (xAxisHovered == 'G-Mean') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','<b>G-Mean</b>','<b>G-Mean</b>','<b>G-Mean</b>','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']]);
index = 3
}
else if (xAxisHovered == 'Precision') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','<b>Precision</b>','<b>Precision</b>','<b>Precision</b>','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']]);
index = 4
}
else if (xAxisHovered == 'Recall') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','<b>Recall</b>','<b>Recall</b>','<b>Recall</b>','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','Log Loss']]);
index = 5
}
else if (xAxisHovered == 'F-Beta Sc') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','<b>F-Beta Sc</b>','MCC','ROC AUC','Log Loss']]);
index = 6
}
else if (xAxisHovered == 'MCC') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','<b>MCC</b>','ROC AUC','Log Loss']]);
index = 7
}
else if (xAxisHovered == 'ROC AUC') {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','<b>ROC AUC</b>','Log Loss']]);
index = 8
}
else {
Plotly.restyle(boxPlot, 'x', [['Accuracy','MAE','RMSE','G-Mean','G-Mean','G-Mean','Precision','Precision','Precision','Recall','Recall','Recall','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','F-Beta Sc','MCC','ROC AUC','<b>Log Loss</b>']]);
index = 9
}
EventBus.$emit('updateMetricsScatter', resultsColors[index])
});
},
reset () {
Plotly.purge('PerMetricBar')
@ -82,14 +163,22 @@ export default {
mounted () {
EventBus.$on('InitializeMetricsBarChart', data => {this.barchartmetrics = data;})
EventBus.$on('InitializeMetricsBarChart', this.LineBar)
EventBus.$on('Responsive', data => {
this.WH = data})
EventBus.$on('ResponsiveandChange', data => {
this.WH = data})
EventBus.$on('UpdateBarChartperMetric', data => {
this.SelBarChartMetrics = data})
EventBus.$on('UpdateBarChartperMetric', this.LineBar)
EventBus.$on('CallFactorsView', data => {
this.factors = data})
EventBus.$on('CallFactorsView', this.LineBar)
EventBus.$on('updateBoxPlots', this.LineBar)
// reset view
EventBus.$on('resetViews', this.reset)
}

@ -2,10 +2,10 @@
<div>
<b-row class="md-3">
<b-col cols="12">
<div>Projection Selection: <select id="selectBarChartPred" @change="selectVisualRepresentationPred()">
<option value="mds" selected>MDS Projection</option>
<option value="tsne">t-SNE Projection</option>
<option value="umap">UMAP Projection</option>
<div>Projection Method: <select id="selectBarChartPred" @change="selectVisualRepresentationPred()">
<option value="mds" selected>MDS</option>
<option value="tsne">t-SNE</option>
<option value="umap">UMAP</option>
</select>
<div id="OverviewPredPlotly" class="OverviewPredPlotly"></div>
</div>
@ -39,6 +39,15 @@ export default {
reset () {
Plotly.purge('OverviewPredPlotly')
},
clean(obj) {
var propNames = Object.getOwnPropertyNames(obj);
for (var i = 0; i < propNames.length; i++) {
var propName = propNames[i];
if (obj[propName] === null || obj[propName] === undefined) {
delete obj[propName];
}
}
},
ScatterPlotPredView () {
Plotly.purge('OverviewPredPlotly')
@ -52,6 +61,11 @@ export default {
const DataSetY = JSON.parse(this.PredictionsData[15])
const originalDataLabels = JSON.parse(this.PredictionsData[16])
var DataSetParse = JSON.parse(DataSet)
var stringParameters = []
for (let i = 0; i < DataSetParse.length; i++) {
this.clean(DataSetParse[i])
stringParameters.push(JSON.stringify(DataSetParse[i]).replace(/,/gi, '<br>'))
}
const XandYCoordinatesTSNE = JSON.parse(this.PredictionsData[18])
const XandYCoordinatesUMAP= JSON.parse(this.PredictionsData[19])
@ -80,7 +94,7 @@ export default {
const aux_ID = result.ID.filter((item, index) => originalDataLabels[index] == target_names[i]);
var Text = aux_ID.map((item, index) => {
let popup = 'Data Point ID: ' + item + '; Details: ' + JSON.stringify(DataSetParse[item])
let popup = 'Data Point ID: ' + item + '<br> Details: ' + stringParameters[item]
return popup;
});
@ -98,7 +112,7 @@ export default {
}
layout = {
title: 'Predictions Space Projection (MDS)',
title: 'MDS Projection',
xaxis: {
visible: false
},
@ -110,6 +124,13 @@ export default {
autosize: true,
width: width,
height: height,
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
} else if (this.representationDef == 'tsne') {
result = XandYCoordinatesTSNE.reduce(function(r, a) {
@ -140,7 +161,7 @@ export default {
const aux_ID = result.ID.filter((item, index) => originalDataLabels[index] == target_names[i]);
var Text = aux_ID.map((item, index) => {
let popup = 'Data Point ID: ' + item + '; Details: ' + JSON.stringify(DataSetParse[item])
let popup = 'Data Point ID: ' + item + '<br> Details: ' + stringParameters[item]
return popup;
});
@ -158,7 +179,7 @@ export default {
}
layout = {
title: 'Prediction Space Projection (t-SNE)',
title: 't-SNE Projection',
xaxis: {
visible: false
},
@ -170,6 +191,13 @@ export default {
autosize: true,
width: width,
height: height,
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
} else {
for (let i = 0; i < XandYCoordinatesUMAP[0].length; i++) {
@ -190,7 +218,7 @@ export default {
const aux_ID = result.ID.filter((item, index) => originalDataLabels[index] == target_names[i]);
var Text = aux_ID.map((item, index) => {
let popup = 'Data Point ID: ' + item + '; Details: ' + JSON.stringify(DataSetParse[item])
let popup = 'Data Point ID: ' + item + '<br> Details: ' + stringParameters[item]
return popup;
});
@ -208,7 +236,7 @@ export default {
}
layout = {
title: 'Predictions Space Projection (UMAP)',
title: 'UMAP Projection',
xaxis: {
visible: false
},
@ -220,6 +248,13 @@ export default {
autosize: true,
width: width,
height: height,
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
}

@ -37,6 +37,7 @@ export default {
ExtraTModels: 2606,
AdaBModels: 2766,
GradBModels: 2926,
AllDetails: '',
platform: ''
}
},
@ -48,6 +49,15 @@ export default {
this.platform.clear();
}
},
clean(obj) {
var propNames = Object.getOwnPropertyNames(obj);
for (var i = 0; i < propNames.length; i++) {
var propName = propNames[i];
if (obj[propName] === null || obj[propName] === undefined) {
delete obj[propName];
}
}
},
provenance () {
var canvas = document.getElementById("main-canvas");
var width = this.WH[0]*9 // interactive visualization
@ -66,6 +76,18 @@ export default {
var flagGradB = 0
var StackInfo = JSON.parse(this.stackInformation[1])
var parameters = JSON.parse(this.AllDetails[2])
var parameters = JSON.parse(parameters)
var stringParameters = []
var temp = 0
for (let i = 0; i < StackInfo.length; i++) {
this.clean(parameters[i])
temp = JSON.stringify(Object.assign({ID: StackInfo[i]}, parameters[i]))
stringParameters.push(temp)
}
// Create a WebGL 2D platform on the canvas:
this.platform = Stardust.platform("webgl-2d", canvas, width, height);
@ -211,11 +233,14 @@ export default {
isotypes.data(this.data);
EventBus.$emit('ExtractResults', stringParameters)
isotypes.render();
this.counter = this.counter + 1
}
},
mounted () {
EventBus.$on('ParametersProvenance', data => {this.AllDetails = data})
EventBus.$on('InitializeProvenance', data => {this.stackInformation = data})
EventBus.$on('InitializeProvenance', this.provenance)
EventBus.$on('Responsive', data => {

@ -21,6 +21,7 @@ export default {
methods: {
resetClass () {
EventBus.$emit('clearPCP')
EventBus.$emit('EraseSelectionBarChart', '')
EventBus.$emit('alternateFlagLock')
EventBus.$emit('boxplotCall', true)
}

@ -1,17 +1,25 @@
<template>
<div>
<div align="center">
Projection Selection: <select id="selectBarChart" @change="selectVisualRepresentation()">
<option value="mds" selected>MDS Projection</option>
<option value="tsne">t-SNE Projection</option>
<option value="umap">UMAP Projection</option>
Projection Method: <select id="selectBarChart" @change="selectVisualRepresentation()">
<option value="mds" selected>MDS</option>
<option value="tsne">t-SNE</option>
<option value="umap">UMAP</option>
</select>
&nbsp;&nbsp;
Action: <button
id="RemoveStack"
v-on:click="RemoveStack">
<font-awesome-icon icon="minus" />
{{ valueStackRemove }}
</button>
&nbsp;&nbsp;
Filter: <button
id="ResetSelection"
v-on:click="resetSelection">
<font-awesome-icon icon="sync-alt" />
{{ valueResetSel }}
</button>
</div>
<div id="OverviewPlotly" class="OverviewPlotly"></div>
</div>
@ -38,14 +46,21 @@ export default {
max: 0,
min: 0,
WH: [],
newColorsUpdate: [],
parametersAll: [],
length: 0,
valueStackRemove: 'Remove from Stack',
valueStackRemove: 'Remove Unselected from Stack',
DataPointsSelUpdate: [],
ModelsIDGray: []
ModelsIDGray: [],
valueResetSel: 'Reset Metric Selection'
}
},
methods: {
resetSelection () {
this.newColorsUpdate = []
this.ScatterPlotView()
EventBus.$emit('updateBoxPlots')
},
reset () {
Plotly.purge('OverviewPlotly')
},
@ -57,10 +72,27 @@ export default {
RemoveStack () {
EventBus.$emit('RemoveFromStack')
},
clean(obj) {
var propNames = Object.getOwnPropertyNames(obj);
for (var i = 0; i < propNames.length; i++) {
var propName = propNames[i];
if (obj[propName] === null || obj[propName] === undefined) {
delete obj[propName];
}
}
},
ScatterPlotView () {
Plotly.purge('OverviewPlotly')
var colorsforScatterPlot = JSON.parse(this.ScatterPlotResults[0])
if (this.newColorsUpdate.length != 0) {
let resultsClear = JSON.parse(this.newColorsUpdate)
for (let j = 0; j < Object.values(resultsClear).length; j++) {
colorsforScatterPlot.push(Object.values(resultsClear)[j])
}
}
var MDSData = JSON.parse(this.ScatterPlotResults[1])
var parameters = JSON.parse(this.ScatterPlotResults[2])
var TSNEData = JSON.parse(this.ScatterPlotResults[12])
@ -69,7 +101,12 @@ export default {
EventBus.$emit('sendPointsNumber', modelId.length)
parameters = JSON.parse(parameters)
var parameters = JSON.parse(parameters)
var stringParameters = []
for (let i = 0; i < parameters.length; i++) {
this.clean(parameters[i])
stringParameters.push(JSON.stringify(parameters[i]).replace(/,/gi, '<br>'))
}
if (this.colorsforOver.length != 0) {
if (this.colorsforOver[1].length != 0) {
@ -83,14 +120,14 @@ export default {
var classifiersInfoProcessing = []
for (let i = 0; i < modelId.length; i++) {
classifiersInfoProcessing[i] = 'Model ID: ' + modelId[i] + '; Details: ' + JSON.stringify(parameters[i])
classifiersInfoProcessing[i] = 'Model ID: ' + modelId[i] + '<br> Details: ' + stringParameters[i]
}
var listofNumbersModelsIDs = []
var StackModelsIDs = []
if (this.ModelsIDGray.length != 0) {
for (let j = 0; j < this.ModelsIDGray.length; j++){
listofNumbersModelsIDs.push(parseInt(this.ModelsIDGray[j].replace(/\D/g, "")))
listofNumbersModelsIDs.push(parseInt(this.ModelsIDGray[j]))
}
var parametersNew = []
@ -110,12 +147,12 @@ export default {
EventBus.$emit('sendPointsNumber', StackModelsIDs.length)
var classifiersInfoProcessing = []
for (let i = 0; i < StackModelsIDs.length; i++) {
classifiersInfoProcessing[i] = 'Model ID: ' + StackModelsIDs[i] + '; Details: ' + JSON.stringify(parametersNew[i])
classifiersInfoProcessing[i] = 'Model ID: ' + StackModelsIDs[i] + '; Details: ' + stringParameters[i]
}
MDSData[0] = MDSDataNewX
MDSData[1] = MDSDataNewY
colorsforScatterPlot = colorsforScatterPlotNew
EventBus.$emit('NewHeatmapAccordingtoNewStack', StackModelsIDs)
//EventBus.$emit('NewHeatmapAccordingtoNewStack', StackModelsIDs)
}
var DataGeneral
@ -124,6 +161,9 @@ export default {
var maxY
var minY
var width = this.WH[0]*6.5 // interactive visualization
var height = this.WH[1]*1.22 // interactive visualization
var layout
if (this.representationDef == 'mds') {
maxX = Math.max(MDSData[0])
@ -146,16 +186,14 @@ export default {
size: 12,
colorscale: 'Viridis',
colorbar: {
title: 'Metrics Average',
title: '# Performance (%) #',
titleside: 'Top'
},
}
}]
var width = this.WH[0]*6.5 // interactive visualization
var height = this.WH[1]*1.22 // interactive visualization
layout = {
title: 'Models Performance (MDS)',
title: 'MDS Projection',
xaxis: {
visible: false,
range: [minX, maxX]
@ -171,6 +209,13 @@ export default {
hovermode: "closest",
hoverlabel: { bgcolor: "#FFF" },
legend: {orientation: 'h', y: -0.3},
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
} else if (this.representationDef == 'tsne') {
var result = TSNEData.reduce(function(r, a) {
@ -183,7 +228,7 @@ export default {
})
return r;
}, {})
maxX = Math.max(result.Xax)
minX = Math.min(result.Xax)
maxY = Math.max(result.Yax)
@ -209,7 +254,7 @@ export default {
}
}]
layout = {
title: 'Models Performance (t-SNE)',
title: 't-SNE Projection',
xaxis: {
visible: false,
range: [minX, maxX]
@ -225,6 +270,13 @@ export default {
hovermode: "closest",
hoverlabel: { bgcolor: "#FFF" },
legend: {orientation: 'h', y: -0.3},
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
} else {
@ -254,10 +306,8 @@ export default {
}
}]
var width = this.WH[0]*6.5 // interactive visualization
var height = this.WH[1]*1 // interactive visualization
layout = {
title: 'Models Performance (UMAP)',
title: 'UMAP Projection',
xaxis: {
visible: false,
range: [minX, maxX]
@ -273,6 +323,13 @@ export default {
hovermode: "closest",
hoverlabel: { bgcolor: "#FFF" },
legend: {orientation: 'h', y: -0.3},
margin: {
l: 50,
r: 0,
b: 30,
t: 40,
pad: 0
},
}
}
@ -286,7 +343,9 @@ export default {
},
selectedPointsOverview () {
const OverviewPlotly = document.getElementById('OverviewPlotly')
var allModels = JSON.parse(this.ScatterPlotResults[13])
OverviewPlotly.on('plotly_selected', function (evt) {
var pushModelsRemaining = []
if (typeof evt !== 'undefined') {
const ClassifierIDsList = []
const ClassifierIDsListCleared = []
@ -302,9 +361,14 @@ export default {
ClassifierIDsListCleared.push(numberNumb)
}
}
if (ClassifierIDsList != '') {
EventBus.$emit('SendSelectedPointsToServerEvent', ClassifierIDsList)
EventBus.$emit('SendSelectedPointsToBrushHeatmap', ClassifierIDsListCleared)
for (let i = 0; i < allModels.length; i++) {
if (!ClassifierIDsListCleared.includes(allModels[i])) {
pushModelsRemaining.push(allModels[i])
}
}
if (allModels != '') {
EventBus.$emit('SendSelectedPointsToServerEvent', pushModelsRemaining)
EventBus.$emit('SendSelectedPointsToBrushHeatmap', pushModelsRemaining)
} else {
EventBus.$emit('SendSelectedPointsToServerEvent', '')
}
@ -357,8 +421,12 @@ export default {
}
},
mounted() {
EventBus.$on('updateMetricsScatter', data => { this.newColorsUpdate = data })
EventBus.$on('updateMetricsScatter', this.ScatterPlotView)
EventBus.$on('GrayOutPoints', data => { this.ModelsIDGray = data })
EventBus.$on('GrayOutPoints', this.ScatterPlotView)
EventBus.$on('emittedEventCallingBrushedBoxPlot', data => {
this.brushedBox = data})
EventBus.$on('emittedEventCallingScatterPlot', data => {

@ -0,0 +1,24 @@
#!/usr/bin/env python
import sys
import pandas as pd
import pymongo
import json
import os
def import_content(filepath):
mng_client = pymongo.MongoClient('localhost', 27017)
mng_db = mng_client['mydb']
collection_name = 'DiabetesC'
db_cm = mng_db[collection_name]
cdir = os.path.dirname(__file__)
file_res = os.path.join(cdir, filepath)
data = pd.read_csv(file_res)
data_json = json.loads(data.to_json(orient='records'))
db_cm.remove()
db_cm.insert(data_json)
if __name__ == "__main__":
filepath = '/Users/anchaa/Documents/Research/StackVis_code/StackVis/diabetes.csv'
import_content(filepath)

@ -0,0 +1,152 @@
"sepal_length","sepal_width","petal_length","petal_width","Species*"
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
4.6,3.1,1.5,0.2,Iris-setosa
5.0,3.6,1.4,0.2,Iris-setosa
5.4,3.9,1.7,0.4,Iris-setosa
4.6,3.4,1.4,0.3,Iris-setosa
5.0,3.4,1.5,0.2,Iris-setosa
4.4,2.9,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.4,3.7,1.5,0.2,Iris-setosa
4.8,3.4,1.6,0.2,Iris-setosa
4.8,3.0,1.4,0.1,Iris-setosa
4.3,3.0,1.1,0.1,Iris-setosa
5.8,4.0,1.2,0.2,Iris-setosa
5.7,4.4,1.5,0.4,Iris-setosa
5.4,3.9,1.3,0.4,Iris-setosa
5.1,3.5,1.4,0.3,Iris-setosa
5.7,3.8,1.7,0.3,Iris-setosa
5.1,3.8,1.5,0.3,Iris-setosa
5.4,3.4,1.7,0.2,Iris-setosa
5.1,3.7,1.5,0.4,Iris-setosa
4.6,3.6,1.0,0.2,Iris-setosa
5.1,3.3,1.7,0.5,Iris-setosa
4.8,3.4,1.9,0.2,Iris-setosa
5.0,3.0,1.6,0.2,Iris-setosa
5.0,3.4,1.6,0.4,Iris-setosa
5.2,3.5,1.5,0.2,Iris-setosa
5.2,3.4,1.4,0.2,Iris-setosa
4.7,3.2,1.6,0.2,Iris-setosa
4.8,3.1,1.6,0.2,Iris-setosa
5.4,3.4,1.5,0.4,Iris-setosa
5.2,4.1,1.5,0.1,Iris-setosa
5.5,4.2,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.0,3.2,1.2,0.2,Iris-setosa
5.5,3.5,1.3,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
4.4,3.0,1.3,0.2,Iris-setosa
5.1,3.4,1.5,0.2,Iris-setosa
5.0,3.5,1.3,0.3,Iris-setosa
4.5,2.3,1.3,0.3,Iris-setosa
4.4,3.2,1.3,0.2,Iris-setosa
5.0,3.5,1.6,0.6,Iris-setosa
5.1,3.8,1.9,0.4,Iris-setosa
4.8,3.0,1.4,0.3,Iris-setosa
5.1,3.8,1.6,0.2,Iris-setosa
4.6,3.2,1.4,0.2,Iris-setosa
5.3,3.7,1.5,0.2,Iris-setosa
5.0,3.3,1.4,0.2,Iris-setosa
7.0,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4.0,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
5.7,2.8,4.5,1.3,Iris-versicolor
6.3,3.3,4.7,1.6,Iris-versicolor
4.9,2.4,3.3,1.0,Iris-versicolor
6.6,2.9,4.6,1.3,Iris-versicolor
5.2,2.7,3.9,1.4,Iris-versicolor
5.0,2.0,3.5,1.0,Iris-versicolor
5.9,3.0,4.2,1.5,Iris-versicolor
6.0,2.2,4.0,1.0,Iris-versicolor
6.1,2.9,4.7,1.4,Iris-versicolor
5.6,2.9,3.6,1.3,Iris-versicolor
6.7,3.1,4.4,1.4,Iris-versicolor
5.6,3.0,4.5,1.5,Iris-versicolor
5.8,2.7,4.1,1.0,Iris-versicolor
6.2,2.2,4.5,1.5,Iris-versicolor
5.6,2.5,3.9,1.1,Iris-versicolor
5.9,3.2,4.8,1.8,Iris-versicolor
6.1,2.8,4.0,1.3,Iris-versicolor
6.3,2.5,4.9,1.5,Iris-versicolor
6.1,2.8,4.7,1.2,Iris-versicolor
6.4,2.9,4.3,1.3,Iris-versicolor
6.6,3.0,4.4,1.4,Iris-versicolor
6.8,2.8,4.8,1.4,Iris-versicolor
6.7,3.0,5.0,1.7,Iris-versicolor
6.0,2.9,4.5,1.5,Iris-versicolor
5.7,2.6,3.5,1.0,Iris-versicolor
5.5,2.4,3.8,1.1,Iris-versicolor
5.5,2.4,3.7,1.0,Iris-versicolor
5.8,2.7,3.9,1.2,Iris-versicolor
6.0,2.7,5.1,1.6,Iris-versicolor
5.4,3.0,4.5,1.5,Iris-versicolor
6.0,3.4,4.5,1.6,Iris-versicolor
6.7,3.1,4.7,1.5,Iris-versicolor
6.3,2.3,4.4,1.3,Iris-versicolor
5.6,3.0,4.1,1.3,Iris-versicolor
5.5,2.5,4.0,1.3,Iris-versicolor
5.5,2.6,4.4,1.2,Iris-versicolor
6.1,3.0,4.6,1.4,Iris-versicolor
5.8,2.6,4.0,1.2,Iris-versicolor
5.0,2.3,3.3,1.0,Iris-versicolor
5.6,2.7,4.2,1.3,Iris-versicolor
5.7,3.0,4.2,1.2,Iris-versicolor
5.7,2.9,4.2,1.3,Iris-versicolor
6.2,2.9,4.3,1.3,Iris-versicolor
5.1,2.5,3.0,1.1,Iris-versicolor
5.7,2.8,4.1,1.3,Iris-versicolor
6.3,3.3,6.0,2.5,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
7.1,3.0,5.9,2.1,Iris-virginica
6.3,2.9,5.6,1.8,Iris-virginica
6.5,3.0,5.8,2.2,Iris-virginica
7.6,3.0,6.6,2.1,Iris-virginica
4.9,2.5,4.5,1.7,Iris-virginica
7.3,2.9,6.3,1.8,Iris-virginica
6.7,2.5,5.8,1.8,Iris-virginica
7.2,3.6,6.1,2.5,Iris-virginica
6.5,3.2,5.1,2.0,Iris-virginica
6.4,2.7,5.3,1.9,Iris-virginica
6.8,3.0,5.5,2.1,Iris-virginica
5.7,2.5,5.0,2.0,Iris-virginica
5.8,2.8,5.1,2.4,Iris-virginica
6.4,3.2,5.3,2.3,Iris-virginica
6.5,3.0,5.5,1.8,Iris-virginica
7.7,3.8,6.7,2.2,Iris-virginica
7.7,2.6,6.9,2.3,Iris-virginica
6.0,2.2,5.0,1.5,Iris-virginica
6.9,3.2,5.7,2.3,Iris-virginica
5.6,2.8,4.9,2.0,Iris-virginica
7.7,2.8,6.7,2.0,Iris-virginica
6.3,2.7,4.9,1.8,Iris-virginica
6.7,3.3,5.7,2.1,Iris-virginica
7.2,3.2,6.0,1.8,Iris-virginica
6.2,2.8,4.8,1.8,Iris-virginica
6.1,3.0,4.9,1.8,Iris-virginica
6.4,2.8,5.6,2.1,Iris-virginica
7.2,3.0,5.8,1.6,Iris-virginica
7.4,2.8,6.1,1.9,Iris-virginica
7.9,3.8,6.4,2.0,Iris-virginica
6.4,2.8,5.6,2.2,Iris-virginica
6.3,2.8,5.1,1.5,Iris-virginica
6.1,2.6,5.6,1.4,Iris-virginica
7.7,3.0,6.1,2.3,Iris-virginica
6.3,3.4,5.6,2.4,Iris-virginica
6.4,3.1,5.5,1.8,Iris-virginica
6.0,3.0,4.8,1.8,Iris-virginica
6.9,3.1,5.4,2.1,Iris-virginica
6.7,3.1,5.6,2.4,Iris-virginica
6.9,3.1,5.1,2.3,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
6.8,3.2,5.9,2.3,Iris-virginica
6.7,3.3,5.7,2.5,Iris-virginica
6.7,3.0,5.2,2.3,Iris-virginica
6.3,2.5,5.0,1.9,Iris-virginica
6.5,3.0,5.2,2.0,Iris-virginica
6.2,3.4,5.4,2.3,Iris-virginica
5.9,3.0,5.1,1.8,Iris-virginica
unable to load file from base commit

476
run.py

@ -22,6 +22,8 @@ from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
@ -407,6 +409,7 @@ def callPreResults():
global XData
global yData
global target_names
global allParametersPerformancePerModel
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
@ -425,6 +428,7 @@ def callPreResults():
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(allParametersPerformancePerModel)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
@ -551,7 +555,7 @@ memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
print('test')
print('start')
# instantiate spark session
spark = (
SparkSession
@ -632,6 +636,12 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
loop = 10
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
@ -727,9 +737,11 @@ def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd):
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.clip(lower=0)
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(impDataInst)) # Position: 8 and so on
return results
@ -749,7 +761,7 @@ def Remove(duplicate):
if np.isnan(num):
pass
else:
final_list.append(int(num))
final_list.append(float(num))
else:
final_list.append(num)
return final_list
@ -876,16 +888,16 @@ def UpdateOverview():
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[14])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[54])
dicRF = json.loads(allParametersPerformancePerModel[62])
dicExtraT = json.loads(allParametersPerformancePerModel[70])
dicAdaB = json.loads(allParametersPerformancePerModel[78])
dicGradB = json.loads(allParametersPerformancePerModel[86])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
@ -927,16 +939,16 @@ def PreprocessingMetrics():
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[23])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[63])
dicExtraT = json.loads(allParametersPerformancePerModel[71])
dicAdaB = json.loads(allParametersPerformancePerModel[79])
dicGradB = json.loads(allParametersPerformancePerModel[87])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
@ -986,20 +998,19 @@ def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
ModelsList.append(temp[0])
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[23])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[63])
dicExtraT = json.loads(allParametersPerformancePerModel[71])
dicAdaB = json.loads(allParametersPerformancePerModel[79])
dicGradB = json.loads(allParametersPerformancePerModel[87])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
@ -1047,8 +1058,7 @@ def PreprocessingPredUpdate(Models):
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemaining = df_concatProbsCleared.index.values.tolist()
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
@ -1059,31 +1069,27 @@ def PreprocessingPredUpdate(Models):
PredictionSpaceSel = FunMDS(predictionsSel)
#ModelSpaceMDSNewComb = [list(a) for a in zip(PredictionSpaceAll[0], ModelSpaceMDS[1])]
#ModelSpaceMDSNewSel = FunMDS(df_concatMetrics)
#ModelSpaceMDSNewSelComb = [list(a) for a in zip(ModelSpaceMDSNewSel[0], ModelSpaceMDSNewSel[1])]
mtx2PredFinal = []
mtx1Pred, mtx2Pred, disparity2 = procrustes(PredictionSpaceAll, PredictionSpaceSel)
a1, b1 = zip(*mtx2Pred)
a1 = [i[1] for i in mtx2Pred]
b1 = [i[0] for i in mtx2Pred]
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemaining]
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[9])
dicGausNB = json.loads(allParametersPerformancePerModel[17])
dicMLP = json.loads(allParametersPerformancePerModel[25])
dicLR = json.loads(allParametersPerformancePerModel[33])
dicLDA = json.loads(allParametersPerformancePerModel[41])
dicQDA = json.loads(allParametersPerformancePerModel[49])
dicRF = json.loads(allParametersPerformancePerModel[57])
dicExtraT = json.loads(allParametersPerformancePerModel[65])
dicAdaB = json.loads(allParametersPerformancePerModel[73])
dicGradB = json.loads(allParametersPerformancePerModel[81])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
@ -1150,16 +1156,16 @@ def PreprocessingParam():
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[9])
dicGausNB = json.loads(allParametersPerformancePerModel[17])
dicMLP = json.loads(allParametersPerformancePerModel[25])
dicLR = json.loads(allParametersPerformancePerModel[33])
dicLDA = json.loads(allParametersPerformancePerModel[41])
dicQDA = json.loads(allParametersPerformancePerModel[49])
dicRF = json.loads(allParametersPerformancePerModel[57])
dicExtraT = json.loads(allParametersPerformancePerModel[65])
dicAdaB = json.loads(allParametersPerformancePerModel[73])
dicGradB = json.loads(allParametersPerformancePerModel[81])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
@ -1225,16 +1231,16 @@ def PreprocessingParamSep():
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[18])
dicMLP = json.loads(allParametersPerformancePerModel[26])
dicLR = json.loads(allParametersPerformancePerModel[34])
dicLDA = json.loads(allParametersPerformancePerModel[42])
dicQDA = json.loads(allParametersPerformancePerModel[50])
dicRF = json.loads(allParametersPerformancePerModel[58])
dicExtraT = json.loads(allParametersPerformancePerModel[66])
dicAdaB = json.loads(allParametersPerformancePerModel[74])
dicGradB = json.loads(allParametersPerformancePerModel[82])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
@ -1277,16 +1283,16 @@ def preProcessPerClassM():
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[27])
dicLR = json.loads(allParametersPerformancePerModel[35])
dicLDA = json.loads(allParametersPerformancePerModel[43])
dicQDA = json.loads(allParametersPerformancePerModel[51])
dicRF = json.loads(allParametersPerformancePerModel[59])
dicExtraT = json.loads(allParametersPerformancePerModel[67])
dicAdaB = json.loads(allParametersPerformancePerModel[75])
dicGradB = json.loads(allParametersPerformancePerModel[83])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
@ -1329,16 +1335,16 @@ def preProcessFeatAcc():
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[36])
dicLDA = json.loads(allParametersPerformancePerModel[44])
dicQDA = json.loads(allParametersPerformancePerModel[52])
dicRF = json.loads(allParametersPerformancePerModel[60])
dicExtraT = json.loads(allParametersPerformancePerModel[68])
dicAdaB = json.loads(allParametersPerformancePerModel[76])
dicGradB = json.loads(allParametersPerformancePerModel[84])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
@ -1388,6 +1394,9 @@ def preProcessFeatSc():
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics.loc[:, 'mean_test_neg_mean_absolute_error'] = loopThroughMetrics.loc[:, 'mean_test_neg_mean_absolute_error'] + 1
loopThroughMetrics.loc[:, 'mean_test_neg_root_mean_squared_error'] = loopThroughMetrics.loc[:, 'mean_test_neg_root_mean_squared_error'] + 1
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
@ -1396,44 +1405,45 @@ def preProcsumPerMetric(factors):
if sum(factors) is 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors))
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'].sum()/loopThroughMetrics['mean_test_accuracy'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_neg_mean_absolute_error'].sum()/loopThroughMetrics['mean_test_neg_mean_absolute_error'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_neg_root_mean_squared_error'].sum()/loopThroughMetrics['mean_test_neg_root_mean_squared_error'].count())
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'].sum()/loopThroughMetrics['geometric_mean_score_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'].sum()/loopThroughMetrics['geometric_mean_score_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'].sum()/loopThroughMetrics['geometric_mean_score_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'].sum()/loopThroughMetrics['mean_test_precision_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'].sum()/loopThroughMetrics['mean_test_precision_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'].sum()/loopThroughMetrics['mean_test_precision_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'].sum()/loopThroughMetrics['mean_test_recall_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'].sum()/loopThroughMetrics['mean_test_recall_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'].sum()/loopThroughMetrics['mean_test_recall_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['f5_micro'].sum()/loopThroughMetrics['f5_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['f5_macro'].sum()/loopThroughMetrics['f5_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'].sum()/loopThroughMetrics['f5_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['f1_micro'].sum()/loopThroughMetrics['f1_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['f1_macro'].sum()/loopThroughMetrics['f1_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'].sum()/loopThroughMetrics['f1_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['f2_micro'].sum()/loopThroughMetrics['f2_micro'].count())
metricsPerModelColl.append(loopThroughMetrics['f2_macro'].sum()/loopThroughMetrics['f2_macro'].count())
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'].sum()/loopThroughMetrics['f2_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'].sum()/loopThroughMetrics['matthews_corrcoef'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'].sum()/loopThroughMetrics['mean_test_roc_auc_ovo_weighted'].count())
metricsPerModelColl.append(loopThroughMetrics['log_loss'].sum()/loopThroughMetrics['log_loss'].count())
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_neg_mean_absolute_error'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_neg_root_mean_squared_error'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
for index, metric in enumerate(metricsPerModelColl):
if (index == 1 or index == 2):
metricsPerModelColl[index] = (metric + 1)*factors[index]
metricsPerModelColl[index] = ((metric + 1)*factors[index]) * 100
elif (index == 23):
metricsPerModelColl[index] = (1 - metric)*factors[index]
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = metric*factors[index]
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
@ -1580,22 +1590,60 @@ def ComputeMetricsForSel(Models):
MetricsAlltoSel = PreprocessingMetrics()
listofModels = []
for loop in Models['ClassifiersList']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofModels.append(temp[0])
listofModels.append(loop)
MetricsAlltoSel = MetricsAlltoSel.loc[listofModels,:]
global metricsPerModelCollSel
global factors
metricsPerModelCollSel = []
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'].sum()/MetricsAlltoSel['mean_test_accuracy'].count())
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_f1_macro'].sum()/MetricsAlltoSel['mean_test_f1_macro'].count())
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision'].sum()/MetricsAlltoSel['mean_test_precision'].count())
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall'].sum()/MetricsAlltoSel['mean_test_recall'].count())
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_jaccard'].sum()/MetricsAlltoSel['mean_test_jaccard'].count())
for index, metric in enumerate(metricsPerModelCollSel):
metricsPerModelCollSel[index] = metric*factors[index]
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_neg_mean_absolute_error'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_neg_root_mean_squared_error'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['matthews_corrcoef'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_roc_auc_ovo_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['log_loss'])
for index, metric in enumerate(metricsPerModelCollSel):
if (index == 1 or index == 2):
metricsPerModelCollSel[index] = (metric + 1)*factors[index]
elif (index == 23):
metricsPerModelCollSel[index] = (1 - metric)*factors[index]
else:
metricsPerModelCollSel[index] = metric*factors[index]
metricsPerModelCollSel[index] = metricsPerModelCollSel[index].to_json()
return 'okay'
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/BarChartSelectedModels', methods=["GET", "POST"])
def SendToUpdateBarChart():
@ -1608,13 +1656,14 @@ def SendToUpdateBarChart():
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestDataPoint', methods=["GET", "POST"])
def RetrieveSelDataPoints():
DataPointsSel = request.get_data().decode('utf8').replace("'", '"')
DataPointsSelClear = json.loads(DataPointsSel)
listofDataPoints = []
for loop in DataPointsSelClear['DataPointsSel']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofDataPoints.append(temp[0])
global algorithmsList
paramsListSepPD = []
paramsListSepPD = PreprocessingParamSep()
@ -1644,7 +1693,6 @@ def RetrieveSelDataPoints():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListSVC.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGausNB = []
for key, value in paramsListSeptoDicGausNB.items():
@ -1740,7 +1788,6 @@ def RetrieveSelDataPoints():
if (len(paramsListSeptoDicGradB['n_estimators']) is 0):
RetrieveParamsClearedListGradB = []
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
@ -1787,8 +1834,7 @@ def RetrieveSelDataPoints():
params = RetrieveParamsClearedListGradB
AlgorithmsIDsEnd = GradBModelsCount
metricsSelList = GridSearchSel(clf, params, factors, AlgorithmsIDsEnd, listofDataPoints)
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10])):
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
@ -2023,10 +2069,12 @@ def RetrieveSelDataPoints():
dfAdaBCleared = dfGradB.drop(dfGradB.index[set_diff_df])
df_concatMetrics = dfAdaBCleared
df_concatMetrics.loc[:, 'mean_test_neg_mean_absolute_error'] = df_concatMetrics.loc[:, 'mean_test_neg_mean_absolute_error'] + 1
df_concatMetrics.loc[:, 'mean_test_neg_root_mean_squared_error'] = df_concatMetrics.loc[:, 'mean_test_neg_root_mean_squared_error'] + 1
df_concatMetrics.loc[:, 'log_loss'] = 1 - df_concatMetrics.loc[:, 'log_loss']
global sumPerClassifierSelUpdate
sumPerClassifierSelUpdate = []
sumPerClassifierSelUpdate = preProcsumPerMetricAccordingtoData(factors, df_concatMetrics)
ModelSpaceMDSNewComb = [list(a) for a in zip(ModelSpaceMDS[0], ModelSpaceMDS[1])]
ModelSpaceMDSNewSel = FunMDS(df_concatMetrics)
@ -2195,15 +2243,13 @@ def preProcsumPerMetricAccordingtoData(factors, loopThroughMetrics):
sumPerClassifier = []
for row in loopThroughMetrics.iterrows():
rowSum = 0
lengthFactors = len(scoring)
name, values = row
for loop, elements in enumerate(values):
lengthFactors = lengthFactors - 1 + factors[loop]
rowSum = elements*factors[loop] + rowSum
if lengthFactors is 0:
if sum(factors) is 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/lengthFactors)
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
# Sending the overview classifiers' results to be visualized as a scatterplot
@ -2242,6 +2288,7 @@ def EnsembleModel(Models, keyRetrieved):
global XData
global yData
global sclf
lr = LogisticRegression()
@ -2258,75 +2305,75 @@ def EnsembleModel(Models, keyRetrieved):
arg = dfParamKNNFilt[eachelem]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), KNeighborsClassifier().set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[9])
temp = json.loads(allParametersPerformancePerModel[10])
dfParamSVC = pd.DataFrame.from_dict(temp)
dfParamSVCFilt = dfParamSVC.iloc[:,0]
for eachelem in SVCModels:
arg = dfParamSVCFilt[eachelem-SVCModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), SVC(probability=True).set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), SVC(probability=True,random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[17])
temp = json.loads(allParametersPerformancePerModel[19])
dfParamGauNB = pd.DataFrame.from_dict(temp)
dfParamGauNBFilt = dfParamGauNB.iloc[:,0]
for eachelem in GausNBModels:
arg = dfParamGauNBFilt[eachelem-GausNBModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), GaussianNB().set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[25])
temp = json.loads(allParametersPerformancePerModel[28])
dfParamMLP = pd.DataFrame.from_dict(temp)
dfParamMLPFilt = dfParamMLP.iloc[:,0]
for eachelem in MLPModels:
arg = dfParamMLPFilt[eachelem-MLPModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), MLPClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), MLPClassifier(random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[33])
temp = json.loads(allParametersPerformancePerModel[37])
dfParamLR = pd.DataFrame.from_dict(temp)
dfParamLRFilt = dfParamLR.iloc[:,0]
for eachelem in LRModels:
arg = dfParamLRFilt[eachelem-LRModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), LogisticRegression().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), LogisticRegression(random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[41])
temp = json.loads(allParametersPerformancePerModel[46])
dfParamLDA = pd.DataFrame.from_dict(temp)
dfParamLDAFilt = dfParamLDA.iloc[:,0]
for eachelem in LDAModels:
arg = dfParamLDAFilt[eachelem-LDAModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), LinearDiscriminantAnalysis().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), LinearDiscriminantAnalysis(random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[49])
temp = json.loads(allParametersPerformancePerModel[55])
dfParamQDA = pd.DataFrame.from_dict(temp)
dfParamQDAFilt = dfParamQDA.iloc[:,0]
for eachelem in QDAModels:
arg = dfParamQDAFilt[eachelem-QDAModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), QuadraticDiscriminantAnalysis().set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[57])
temp = json.loads(allParametersPerformancePerModel[64])
dfParamRF = pd.DataFrame.from_dict(temp)
dfParamRFFilt = dfParamRF.iloc[:,0]
for eachelem in RFModels:
arg = dfParamRFFilt[eachelem-RFModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), RandomForestClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), RandomForestClassifier(random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[65])
temp = json.loads(allParametersPerformancePerModel[73])
dfParamExtraT = pd.DataFrame.from_dict(temp)
dfParamExtraTFilt = dfParamExtraT.iloc[:,0]
for eachelem in ExtraTModels:
arg = dfParamExtraTFilt[eachelem-ExtraTModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), ExtraTreesClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), ExtraTreesClassifier(random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[73])
temp = json.loads(allParametersPerformancePerModel[82])
dfParamAdaB = pd.DataFrame.from_dict(temp)
dfParamAdaBFilt = dfParamAdaB.iloc[:,0]
for eachelem in AdaBModels:
arg = dfParamAdaBFilt[eachelem-AdaBModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), AdaBoostClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), AdaBoostClassifier(random_state=RANDOM_SEED).set_params(**arg)))
temp = json.loads(allParametersPerformancePerModel[81])
temp = json.loads(allParametersPerformancePerModel[91])
dfParamGradB = pd.DataFrame.from_dict(temp)
dfParamGradBFilt = dfParamGradB.iloc[:,0]
for eachelem in GradBModels:
arg = dfParamGradBFilt[eachelem-GradBModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), GradientBoostingClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=columnsInit), GradientBoostingClassifier(random_state=RANDOM_SEED).set_params(**arg)))
global sclfStack
sclfStack = 0
@ -2345,8 +2392,7 @@ def EnsembleModel(Models, keyRetrieved):
for index, modHere in enumerate(ModelsAll):
flag = 0
for loop in Models['ClassifiersList']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
if (int(temp[0]) == int(modHere)):
if (int(loop) == int(modHere)):
flag = 1
if (flag is 1):
all_classifiersSelection.append(all_classifiers[index])
@ -2376,12 +2422,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[9])
temp = json.loads(allParametersPerformancePerModel[10])
dfParamSVC = pd.DataFrame.from_dict(temp)
dfParamSVCFilt = dfParamSVC.iloc[:,0]
for index, eachelem in enumerate(SVCModels):
arg = dfParamRFFilt[eachelem-SVCModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), SVC(probability=True).set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), SVC(probability=True,random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2389,7 +2435,7 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[17])
temp = json.loads(allParametersPerformancePerModel[19])
dfParamGauNB = pd.DataFrame.from_dict(temp)
dfParamGauNBFilt = dfParamGauNB.iloc[:,0]
for index, eachelem in enumerate(GausNBModels):
@ -2402,12 +2448,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[25])
temp = json.loads(allParametersPerformancePerModel[28])
dfParamMLP = pd.DataFrame.from_dict(temp)
dfParamMLPFilt = dfParamMLP.iloc[:,0]
for index, eachelem in enumerate(MLPModels):
arg = dfParamMLPFilt[eachelem-MLPModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), MLPClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), MLPClassifier(random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2415,12 +2461,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[33])
temp = json.loads(allParametersPerformancePerModel[37])
dfParamLR = pd.DataFrame.from_dict(temp)
dfParamLRFilt = dfParamLR.iloc[:,0]
for index, eachelem in enumerate(LRModels):
arg = dfParamLRFilt[eachelem-LRModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), LogisticRegression().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), LogisticRegression(random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2428,12 +2474,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[41])
temp = json.loads(allParametersPerformancePerModel[46])
dfParamLDA = pd.DataFrame.from_dict(temp)
dfParamLDAFilt = dfParamLDA.iloc[:,0]
for index, eachelem in enumerate(LDAModels):
arg = dfParamLDAFilt[eachelem-LDAModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), LinearDiscriminantAnalysis().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), LinearDiscriminantAnalysis(random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2441,7 +2487,7 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[49])
temp = json.loads(allParametersPerformancePerModel[55])
dfParamQDA = pd.DataFrame.from_dict(temp)
dfParamQDAFilt = dfParamQDA.iloc[:,0]
for index, eachelem in enumerate(QDAModels):
@ -2454,12 +2500,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[57])
temp = json.loads(allParametersPerformancePerModel[64])
dfParamRF = pd.DataFrame.from_dict(temp)
dfParamRFFilt = dfParamRF.iloc[:,0]
for index, eachelem in enumerate(RFModels):
arg = dfParamRFFilt[eachelem-RFModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), RandomForestClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), RandomForestClassifier(random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2467,12 +2513,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[65])
temp = json.loads(allParametersPerformancePerModel[73])
dfParamExtraT = pd.DataFrame.from_dict(temp)
dfParamExtraTFilt = dfParamExtraT.iloc[:,0]
for index, eachelem in enumerate(ExtraTModels):
arg = dfParamExtraTFilt[eachelem-ExtraTModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), ExtraTreesClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), ExtraTreesClassifier(random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2480,12 +2526,12 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[73])
temp = json.loads(allParametersPerformancePerModel[82])
dfParamAdaB = pd.DataFrame.from_dict(temp)
dfParamAdaBFilt = dfParamAdaB.iloc[:,0]
for index, eachelem in enumerate(AdaBModels):
arg = dfParamAdaBFilt[eachelem-AdaBModelsCount]
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), AdaBoostClassifier().set_params(**arg)))
all_classifiers.append(make_pipeline(ColumnSelector(cols=featureSelection['featureSelection'][index+store]), AdaBoostClassifier(random_state=RANDOM_SEED).set_params(**arg)))
store = index
flag = 1
@ -2493,7 +2539,7 @@ def EnsembleModel(Models, keyRetrieved):
store = 0
else:
store = store + 1
temp = json.loads(allParametersPerformancePerModel[81])
temp = json.loads(allParametersPerformancePerModel[91])
dfParamGradB = pd.DataFrame.from_dict(temp)
dfParamGradBFilt = dfParamGradB.iloc[:,0]
for index, eachelem in enumerate(GradBModels):
@ -2513,10 +2559,9 @@ def EnsembleModel(Models, keyRetrieved):
for index, modHere in enumerate(ModelsAll):
flag = 0
for loop in Models['ClassifiersList']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
if (int(temp[0]) == int(modHere)):
if (int(loop) == int(modHere)):
flag = 1
if (flag is 0):
if (flag is 1):
all_classifiersSelection.append(all_classifiers[index])
sclfStack = StackingCVClassifier(classifiers=all_classifiersSelection,
@ -2525,6 +2570,7 @@ def EnsembleModel(Models, keyRetrieved):
random_state=RANDOM_SEED,
n_jobs = -1)
#else:
# for index, eachelem in enumerate(algorithmsWithoutDuplicates):
# if (eachelem == 'KNN'):
@ -2540,42 +2586,32 @@ def EnsembleModel(Models, keyRetrieved):
# meta_classifier=lr,
# random_state=RANDOM_SEED,
# n_jobs = -1)
# parallelize all that
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring='accuracy', n_jobs=-1)
scores.append(temp.mean())
scores.append(temp.std())
# influence calculation for all the instances
#global DataHeatmap
#DataHeatmap = []
#for indexValue, row in XData.iterrows():
# XDataRemove = XData.copy()
# XDataRemove.drop(indexValue, inplace=True)
# yDataRemove = yData.copy()
# del yDataRemove[indexValue]
# tempRemove = model_selection.cross_val_score(sclf, XDataRemove, yDataRemove, cv=crossValidation, scoring='accuracy', n_jobs=-1)
# DataHeatmap.append(tempRemove.mean())
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring='precision_weighted', n_jobs=-1)
scores.append(temp.mean())
scores.append(temp.std())
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring='recall_weighted', n_jobs=-1)
scores.append(temp.mean())
scores.append(temp.std())
temp = model_selection.cross_val_score(sclfStack, XData, yData, cv=crossValidation, scoring='accuracy', n_jobs=-1)
scores.append(temp.mean())
scores.append(temp.std())
temp = model_selection.cross_val_score(sclfStack, XData, yData, cv=crossValidation, scoring='precision_weighted', n_jobs=-1)
scores.append(temp.mean())
scores.append(temp.std())
temp = model_selection.cross_val_score(sclfStack, XData, yData, cv=crossValidation, scoring='recall_weighted', n_jobs=-1)
scores.append(temp.mean())
scores.append(temp.std())
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted','accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(sclf,sclfStack,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scores = [item for sublist in flat_results for item in sublist]
return 'Okay'
def solve(sclf,sclfStack,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
if (loop < 3):
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
else:
temp = model_selection.cross_val_score(sclfStack, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
def processInput(indexValue,XData,yData,crossValidation,sclf):
XDataRemove = XData.copy()
XDataRemove.drop(indexValue, inplace=True)
yDataRemove = yData.copy()
del yDataRemove[indexValue]
tempRemove = model_selection.cross_val_score(sclf, XDataRemove, yDataRemove, cv=crossValidation, scoring='accuracy', n_jobs=-1)
return tempRemove.mean()
# Sending the final results to be visualized as a line plot
@app.route('/data/SendFinalResultsBacktoVisualize', methods=["GET", "POST"])
def SendToPlotFinalResults():
@ -2585,13 +2621,13 @@ def SendToPlotFinalResults():
return jsonify(response)
# Sending the final results to be visualized as a line plot
@app.route('/data/SendInstancesImportance', methods=["GET", "POST"])
def SendImportInstances():
global DataHeatmap
response = {
'instancesImportance': DataHeatmap
}
return jsonify(response)
#@app.route('/data/SendInstancesImportance', methods=["GET", "POST"])
#def SendImportInstances():
# global DataHeatmap
# response = {
# 'instancesImportance': DataHeatmap
# }
# return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])

Loading…
Cancel
Save