From 24e2e0d3f9bf18f23a995cee66d4685d07b8e440 Mon Sep 17 00:00:00 2001 From: codingforfun Date: Tue, 27 Mar 2018 15:39:36 +0200 Subject: [PATCH] #11143 [FIX] Normalize node risk with sample weight sum In case of regression trees, node risk is computed as sum of squared error. To get a meaningfull value to compare with it needs to be normalized to the number of samples in the node (or more generally to the sum of sample weights in this node). Otherwise the sum of squared error is highly dependend on the number of samples in the node and comparision with `regressionAccuracy` parameter is not very meaningful. After normalization `node_risk` means in fact sample variance for all samples in the node, which makes much more sence and seams to be what was originaly intended by the code given that node risk is later used as a split termination criteria by ``` sqrt(node.node_risk) < params.getRegressionAccuracy() ``` --- modules/ml/src/tree.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ml/src/tree.cpp b/modules/ml/src/tree.cpp index c02b7440e7..4b3079b3d1 100644 --- a/modules/ml/src/tree.cpp +++ b/modules/ml/src/tree.cpp @@ -632,6 +632,7 @@ void DTreesImpl::calcValue( int nidx, const vector& _sidx ) } node->node_risk = sum2 - (sum/sumw)*sum; + node->node_risk /= sumw; node->value = sum/sumw; } }