Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FIX] Predictions: Handle discrete target with no values #4066

Merged
merged 1 commit into from
Oct 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Orange/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,13 +259,13 @@ def backmap_value(self, value, mapped_probs, n_values, backmappers):

value = backmapper(value)
nans = np.isnan(value)
if not np.any(nans):
if not np.any(nans) or n_values[0] < 2:
return value
if mapped_probs is not None:
value[nans] = np.argmax(mapped_probs[nans], axis=1)
else:
value[nans] = np.RandomState(0).choice(
backmapper(np.arange(0, n_values[0] - 1))
value[nans] = np.random.RandomState(0).choice(
backmapper(np.arange(0, n_values[0] - 1)),
(np.sum(nans), ))
return value

Expand Down
8 changes: 6 additions & 2 deletions Orange/widgets/evaluate/owconfusionmatrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ class Outputs:
class Error(widget.OWWidget.Error):
no_regression = Msg("Confusion Matrix cannot show regression results.")
invalid_values = Msg("Evaluation Results input contains invalid values")
empty_input = widget.Msg("Empty result on input. Nothing to display.")

def __init__(self):
super().__init__()
Expand Down Expand Up @@ -245,11 +246,14 @@ def set_results(self, results):
if results is not None and results.data is not None:
data = results.data[results.row_indices]

self.Error.no_regression.clear()
self.Error.empty_input.clear()
if data is not None and not data.domain.has_discrete_class:
self.Error.no_regression()
data = results = None
else:
self.Error.no_regression.clear()
elif results is not None and not results.actual.size:
self.Error.empty_input()
data = results = None

nan_values = False
if results is not None:
Expand Down
4 changes: 0 additions & 4 deletions Orange/widgets/evaluate/owrocanalysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,10 +303,6 @@ class OWROCAnalysis(widget.OWWidget):
class Inputs:
evaluation_results = Input("Evaluation Results", Orange.evaluation.Results)

class Warning(widget.OWWidget.Warning):
empty_results = widget.Msg(
"Empty results on input. There is nothing to display.")

target_index = settings.Setting(0)
selected_classifiers = []

Expand Down
38 changes: 37 additions & 1 deletion Orange/widgets/evaluate/tests/test_owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,12 @@
from Orange.data.io import TabReader
from Orange.widgets.tests.base import WidgetTest
from Orange.widgets.evaluate.owpredictions import OWPredictions
from Orange.widgets.evaluate.owcalibrationplot import OWCalibrationPlot
from Orange.widgets.evaluate.owconfusionmatrix import OWConfusionMatrix
from Orange.widgets.evaluate.owliftcurve import OWLiftCurve
from Orange.widgets.evaluate.owrocanalysis import OWROCAnalysis

from Orange.data import Table, Domain
from Orange.data import Table, Domain, DiscreteVariable
from Orange.modelling import ConstantLearner, TreeLearner
from Orange.evaluation import Results
from Orange.widgets.tests.utils import excepthook_catch
Expand Down Expand Up @@ -49,6 +53,38 @@ def test_nan_target_input(self):
evres = self.get_output(self.widget.Outputs.evaluation_results)
self.assertEqual(len(evres.data), 0)

def test_no_values_target(self):
train = Table("titanic")
model = ConstantLearner()(train)
self.send_signal(self.widget.Inputs.predictors, model)
domain = Domain([DiscreteVariable("status", values=["first", "third"]),
DiscreteVariable("age", values=["adult", "child"]),
DiscreteVariable("sex", values=["female", "male"])],
[DiscreteVariable("survived", values=[])])
test = Table(domain, np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]),
np.full((3, 1), np.nan))
self.send_signal(self.widget.Inputs.data, test)
pred = self.get_output(self.widget.Outputs.predictions)
self.assertEqual(len(pred), len(test))

results = self.get_output(self.widget.Outputs.evaluation_results)

cm_widget = self.create_widget(OWConfusionMatrix)
self.send_signal(cm_widget.Inputs.evaluation_results, results,
widget=cm_widget)

ra_widget = self.create_widget(OWROCAnalysis)
self.send_signal(ra_widget.Inputs.evaluation_results, results,
widget=ra_widget)

lc_widget = self.create_widget(OWLiftCurve)
self.send_signal(lc_widget.Inputs.evaluation_results, results,
widget=lc_widget)

cp_widget = self.create_widget(OWCalibrationPlot)
self.send_signal(cp_widget.Inputs.evaluation_results, results,
widget=cp_widget)

def test_mismatching_targets(self):
warning = self.widget.Warning

Expand Down
3 changes: 3 additions & 0 deletions Orange/widgets/evaluate/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ def anynan(a):
elif not results.data.domain.has_discrete_class:
error_group.invalid_results(
"Discrete outcome variable is required")
elif not results.actual.size:
error_group.invalid_results(
"Empty result on input. Nothing to display.")
elif check_nan and (anynan(results.actual) or
anynan(results.predicted) or
(results.probabilities is not None and
Expand Down