-
Notifications
You must be signed in to change notification settings - Fork 73
Extend output result & minor fixes #81
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 26 commits
c9f6165
7c33dce
46bf478
e739eba
957465f
bc7e964
a37921c
4b22fb6
33794ed
28b8185
069e15f
e0a6d74
4a49d18
2310156
be16c37
6508ca2
a4f3b70
3afb8b7
501d328
4412444
6f58886
9fa4a94
070c4d2
c7b7a5c
8c8f964
ff9da3b
9871f94
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -338,20 +338,47 @@ def columnwise_score(y, yp, score_func): | |
return score_func(y, yp) | ||
|
||
|
||
def accuracy_score(y, yp): | ||
return columnwise_score(y, yp, lambda y1, y2: np.mean(y1 == y2)) | ||
def accuracy_score(y_true, y_pred): | ||
return columnwise_score(y_true, y_pred, lambda y1, y2: np.mean(y1 == y2)) | ||
|
||
|
||
def log_loss(y, yp): | ||
def log_loss(y_true, y_pred): | ||
from sklearn.metrics import log_loss as sklearn_log_loss | ||
y = convert_to_numpy(y) | ||
yp = convert_to_numpy(yp) | ||
return sklearn_log_loss(y, yp) | ||
y_true = convert_to_numpy(y_true) | ||
y_pred = convert_to_numpy(y_pred) | ||
return sklearn_log_loss(y_true, y_pred) | ||
|
||
|
||
def roc_auc_score(y_true, y_pred, multi_class='ovr'): | ||
from sklearn.metrics import roc_auc_score as sklearn_roc_auc | ||
y_true = convert_to_numpy(y_true) | ||
y_pred = convert_to_numpy(y_pred) | ||
if y_pred.shape[1] == 2: # binary case | ||
y_pred = y_pred[:, 1] | ||
return sklearn_roc_auc(y_true, y_pred, multi_class=multi_class) | ||
|
||
def rmse_score(y, yp): | ||
|
||
def rmse_score(y_true, y_pred): | ||
return columnwise_score( | ||
y, yp, lambda y1, y2: float(np.sqrt(np.mean((y1 - y2)**2)))) | ||
y_true, y_pred, lambda y1, y2: float(np.sqrt(np.mean((y1 - y2)**2)))) | ||
|
||
|
||
def r2_score(y_true, y_pred): | ||
from sklearn.metrics import r2_score as sklearn_r2_score | ||
y_true = convert_to_numpy(y_true) | ||
y_pred = convert_to_numpy(y_pred) | ||
return sklearn_r2_score(y_true, y_pred) | ||
|
||
|
||
def davies_bouldin_score(y_true, y_pred): | ||
from sklearn.metrics.cluster import davies_bouldin_score as sklearn_dbs | ||
y_true = convert_to_numpy(y_true) | ||
y_pred = convert_to_numpy(y_pred) | ||
try: | ||
res = sklearn_dbs(y_true, y_pred) | ||
except ValueError: | ||
res = "Number of labels is 1" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. for example: y _pred= [1, 1, 1, 1, 1, 1, 1,] ? Or? |
||
return res | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm slightly concerned about such exception handling. My opinion is - our own configs shouldn't cause any exceptions. |
||
|
||
|
||
def convert_data(data, dtype, data_order, data_format): | ||
|
@@ -488,16 +515,21 @@ def gen_basic_dict(library, algorithm, stage, params, data, alg_instance=None, | |
|
||
|
||
def print_output(library, algorithm, stages, params, functions, | ||
times, accuracy_type, accuracies, data, alg_instance=None, | ||
times, metric_type, metrics, data, alg_instance=None, | ||
alg_params=None): | ||
if params.output_format == 'json': | ||
output = [] | ||
for i in range(len(stages)): | ||
result = gen_basic_dict(library, algorithm, stages[i], params, | ||
data[i], alg_instance, alg_params) | ||
result.update({'time[s]': times[i]}) | ||
if accuracy_type is not None: | ||
result.update({f'{accuracy_type}': accuracies[i]}) | ||
if metric_type is not None: | ||
if isinstance(metric_type, str): | ||
result.update({f'{metric_type}': metrics[i]}) | ||
elif isinstance(metric_type, list): | ||
for ind, val in enumerate(metric_type): | ||
if metrics[ind][i] is not None: | ||
result.update({f'{val}': metrics[ind][i]}) | ||
if hasattr(params, 'n_classes'): | ||
result['input_data'].update({'classes': params.n_classes}) | ||
if hasattr(params, 'n_clusters'): | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -307,7 +307,7 @@ | |
} | ||
], | ||
"nu": [0.25], | ||
"kernel": ["sigmoid"] | ||
"kernel": ["poly"] | ||
}, | ||
{ | ||
"algorithm": "svr", | ||
|
Uh oh!
There was an error while loading. Please reload this page.