Skip to content Skip to sidebar Skip to footer

Custom Tensorflow Metric: True Positive Rate At Given False Positive Rate

I have a binary classification problem with categories background (bg) = 0, signal (sig) = 1, for which I am training NNs. For monitoring purposes, I am trying to implement a custo

Solution 1:

There's a metric for sensitivity at specificity, which I believe is equivalent (specificity is one minus FPR).

Solution 2:

You can implement your own metric, and here is an example for the false positive rate:

from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.metrics_impl import _aggregate_across_towers
from tensorflow.python.ops.metrics_impl import true_negatives
from tensorflow.python.ops.metrics_impl import false_positives
from tensorflow.python.ops.metrics_impl import _remove_squeezable_dimensions

deffalse_positive_rate(labels,                                                             
               predictions,                                                     
               weights=None,                                                       
               metrics_collections=None,                                           
               updates_collections=None,                                           
               name=None):                                                         
  if context.executing_eagerly():                                                  
    raise RuntimeError('tf.metrics.recall is not supported is not ''supported when eager execution is enabled.')               
                                                                                   
  with variable_scope.variable_scope(name, 'false_alarm',                          
                                     (predictions, labels, weights)):           
    predictions, labels, weights = _remove_squeezable_dimensions(                  
        predictions=math_ops.cast(predictions, dtype=dtypes.bool),                 
        labels=math_ops.cast(labels, dtype=dtypes.bool),                           
        weights=weights)                                                           
                                                                                   
    false_p, false_positives_update_op = false_positives(                          
        labels,                                                                    
        predictions,                                                            
        weights,                                                                   
        metrics_collections=None,                                                  
        updates_collections=None,                                                  
        name=None)                                                                 
    true_n, true_negatives_update_op = true_negatives(                          
        labels,                                                                    
        predictions,                                                               
        weights,                                                                   
        metrics_collections=None,                                                  
        updates_collections=None,                                                  
        name=None)                                                              
                                                                                   
    defcompute_false_positive_rate(true_n, false_p, name):                                        
      return array_ops.where(                                                      
          math_ops.greater(true_n + false_p, 0),                                   
          math_ops.div(false_p, true_n + false_p), 0, name)                        
                                                                                   
    defonce_across_towers(_, true_n, false_p):                                 
      return compute_false_positive_rate(true_n, false_p, 'value')                              
                                                                                   
    false_positive_rate = _aggregate_across_towers(                                                
        metrics_collections, once_across_towers, true_n, false_p)                  
                                                                                   
    update_op = compute_false_positive_rate(true_negatives_update_op,                              
                               false_positives_update_op, 'update_op')             
    if updates_collections:                                                        
      ops.add_to_collections(updates_collections, update_op)                       
                                                                                   
    return false_positive_rate, update_op

You can adapt the code to the true positive rate.

Post a Comment for "Custom Tensorflow Metric: True Positive Rate At Given False Positive Rate"