Ver código fonte

added uncertainty stuff in semseg framework

Alexander Freytag 12 anos atrás
pai
commit
6ca2b34c8d
1 arquivos alterados com 348 adições e 6 exclusões
  1. 348 6
      semseg/SemSegCsurka.cpp

+ 348 - 6
semseg/SemSegCsurka.cpp

@@ -1480,12 +1480,38 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
   }
 
 #ifdef UNCERTAINTY
-  vector<FloatImage> uncert;
+  std::vector<FloatImage> uncert;
+  std::vector<FloatImage> gpUncertainty;
+  std::vector<FloatImage> gpMean;    
+  std::vector<FloatImage> gpMeanRatio;  
+  std::vector<FloatImage> gpWeightAll;
+  std::vector<FloatImage> gpWeightRatio;
+//   std::vector<FloatImage> gpImpactAll;
+//   std::vector<FloatImage> gpImpactRatio;
+  
+  //pre-allocate storage -- one image per scale and method
   for(int s = 0; s < scalesize; s++)
   {
     uncert.push_back(FloatImage(xsize, ysize));
     uncert[s].set(0.0);
+    
+    gpUncertainty.push_back(FloatImage(xsize, ysize));
+    gpMean.push_back(FloatImage(xsize, ysize));
+    gpMeanRatio.push_back(FloatImage(xsize, ysize));
+    gpWeightAll.push_back(FloatImage(xsize, ysize));
+    gpWeightRatio.push_back(FloatImage(xsize, ysize));
+/*    gpImpactAll.push_back(FloatImage(xsize, ysize));    
+    gpImpactRatio.push_back(FloatImage(xsize, ysize));   */  
+   
+    gpUncertainty[s].set(0.0);
+    gpMean[s].set(0.0);
+    gpMeanRatio[s].set(0.0);
+    gpWeightAll[s].set(0.0);
+    gpWeightRatio[s].set(0.0);
+//     gpImpactAll[s].set(0.0); 
+//     gpImpactRatio[s].set(0.0);   
   }
+  
   ColorImage imgrgb ( xsize, ysize );
   std::string s;
   std::stringstream out;
@@ -1495,6 +1521,9 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
   
   double maxu = -numeric_limits<double>::max();
   double minu = numeric_limits<double>::max();
+  
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);
+  
 #endif
 
   if ( classifier != NULL )
@@ -1508,6 +1537,13 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
       {
         ClassificationResult r = classifier->classify ( pce[i].second );
 
+        #ifdef UNCERTAINTY
+        //we need this if we want to compute GP-AL-measure lateron
+        double minMeanAbs ( numeric_limits<double>::max() );
+        double maxMeanAbs ( 0.0 );
+        double sndMaxMeanAbs ( 0.0 );
+        #endif
+        
         for ( int j = 0 ; j < fV.size(); j++ )
         {
           if ( useclass[j] == 0 )
@@ -1515,6 +1551,23 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
 
           fV[j] += r.scores[j];
           preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
+          
+         #ifdef UNCERTAINTY 
+          //check whether we found a class with higher smaller abs mean than the current minimum
+         if (abs(r.scores[j]) < minMeanAbs)  
+           minMeanAbs = abs(r.scores[j]);
+         //check for larger abs mean as well
+         if (abs(r.scores[j]) > maxMeanAbs)
+         {
+           sndMaxMeanAbs = maxMeanAbs;
+           maxMeanAbs = abs(r.scores[j]);
+         }
+         // and also for the second highest mean of all classes
+         else if (abs(r.scores[j]) > sndMaxMeanAbs)
+         {
+           sndMaxMeanAbs = abs(r.scores[j]);
+         }
+         #endif          
         }
 
         /*if(r.uncertainty < 0.0)
@@ -1528,6 +1581,89 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
         uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
         maxu = std::max ( r.uncertainty, maxu );
         minu = std::min ( r.uncertainty, minu );
+        
+        
+        double firstTerm (1.0 / sqrt(r.uncertainty+gpNoise));
+        
+        //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+        // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+        gpUncertainty[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+        
+        // compute results when we take the lowest mean value of all classes
+        gpMean[s] ( pce[i].second.x, pce[i].second.y ) = minMeanAbs;
+        
+        //look at the difference in the absolut mean values for the most plausible class
+        // and the second most plausible class
+        gpMeanRatio[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs - sndMaxMeanAbs;
+        
+
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < fV.size(); j++ )
+        {
+          if ( useclass[j] == 0 )
+            continue;
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(r.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(r.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(r.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm;        
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+//         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+//         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;      
 #endif
       }
     }
@@ -1541,17 +1677,125 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
       for ( int i = s; i < ( int ) pce.size(); i += scalesize )
       {
         ClassificationResult r = vclassifier->classify ( * ( pce[i].second.vec ) );
+        
+        #ifdef UNCERTAINTY
+        //we need this if we want to compute GP-AL-measure lateron
+        double minMeanAbs ( numeric_limits<double>::max() );
+        double maxMeanAbs ( 0.0 );
+        double sndMaxMeanAbs ( 0.0 );
+        #endif        
+        
         for ( int j = 0 ; j < ( int ) fV.size(); j++ )
         {
           if ( useclass[j] == 0 )
             continue;
           fV[j] += r.scores[j];
           preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
+          
+         #ifdef UNCERTAINTY 
+          //check whether we found a class with higher smaller abs mean than the current minimum
+         if (abs(r.scores[j]) < minMeanAbs)  
+           minMeanAbs = abs(r.scores[j]);
+         //check for larger abs mean as well
+         if (abs(r.scores[j]) > maxMeanAbs)
+         {
+           sndMaxMeanAbs = maxMeanAbs;
+           maxMeanAbs = abs(r.scores[j]);
+         }
+         // and also for the second highest mean of all classes
+         else if (abs(r.scores[j]) > sndMaxMeanAbs)
+         {
+           sndMaxMeanAbs = abs(r.scores[j]);
+         }
+         #endif            
         }
 #ifdef UNCERTAINTY
         uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
         maxu = std::max ( r.uncertainty, maxu );
         minu = std::min ( r.uncertainty, minu );
+        
+        
+        double firstTerm (1.0 / sqrt(r.uncertainty+gpNoise));
+        
+        //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+        // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+        gpUncertainty[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+        
+        // compute results when we take the lowest mean value of all classes
+        gpMean[s] ( pce[i].second.x, pce[i].second.y ) = minMeanAbs;
+        
+        //look at the difference in the absolut mean values for the most plausible class
+        // and the second most plausible class
+        gpMeanRatio[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs - sndMaxMeanAbs;
+        
+
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < fV.size(); j++ )
+        {
+          if ( useclass[j] == 0 )
+            continue;
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(r.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(r.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(r.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm;        
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+//         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+//         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;      
 #endif
       }
     }
@@ -1559,9 +1803,32 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
 
 #ifdef UNCERTAINTY
   cout << "maxvdirect: " << maxu << " minvdirect: " << minu << endl;
+  //pre-allocate the image for filtering lateron
   FloatImage gaussUncert ( xsize, ysize );
+  
+  //just store the first scale
   ICETools::convertToRGB ( uncert[0], imgrgb );
   imgrgb.write ( out.str() + "rough.png" );
+  
+  //pre-allocate memory for filtering of scales
+  FloatImage gaussGPUncertainty ( xsize, ysize );
+  FloatImage gaussGPMean ( xsize, ysize );
+  FloatImage gaussGPMeanRatio( xsize, ysize );
+  FloatImage gaussGPWeightAll ( xsize, ysize );
+  FloatImage gaussGPWeightRatio ( xsize, ysize );
+   
+  //just store the first scale for every method
+  ICETools::convertToRGB ( gpUncertainty[0], imgrgb );
+  imgrgb.write ( out.str() + "gpUncertainty.png" );
+  ICETools::convertToRGB ( gpMean[0], imgrgb );
+  imgrgb.write ( out.str() + "gpMean.png" );
+  ICETools::convertToRGB ( gpMeanRatio[0], imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatio.png" );
+  ICETools::convertToRGB ( gpWeightAll[0], imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAll.png" );
+  ICETools::convertToRGB ( gpWeightRatio[0], imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatio.png" );  
+  
 #endif
 
   vector<double> scalesVec;
@@ -1659,6 +1926,19 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
 #ifdef UNCERTAINTY
     filterGaussSigmaApproximate<float, float, float> ( uncert[s], sigma, &gaussUncert );
     uncert[s] = gaussUncert;
+    
+    //apply the gauss-filtering to all scales of every method
+    filterGaussSigmaApproximate<float, float, float> ( gpUncertainty[s], sigma, &gaussGPUncertainty );
+    filterGaussSigmaApproximate<float, float, float> ( gpMean[s], sigma, &gaussGPMean );
+    filterGaussSigmaApproximate<float, float, float> ( gpMeanRatio[s], sigma, &gaussGPMeanRatio );
+    filterGaussSigmaApproximate<float, float, float> ( gpWeightAll[s], sigma, &gaussGPWeightAll );
+    filterGaussSigmaApproximate<float, float, float> ( gpWeightRatio[s], sigma, &gaussGPWeightRatio );
+    
+    gpUncertainty[s] = gaussGPUncertainty; 
+    gpMean[s] = gaussGPMean; 
+    gpMeanRatio[s] = gaussGPMeanRatio; 
+    gpWeightAll[s] = gaussGPWeightAll;
+    gpWeightRatio[s] = gaussGPWeightRatio;   
 #endif
   }
 
@@ -1691,8 +1971,20 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
       for ( int s = 0; s < ( int ) scalesize; s++ )
       {
         gaussUncert(x,y) += uncert[s](x,y);
+        //and for the other methods as well
+        gaussGPUncertainty(x,y) += gpUncertainty[s](x,y);
+        gaussGPMean(x,y) += gpMean[s](x,y);
+        gaussGPMeanRatio(x,y) += gpMeanRatio[s](x,y);
+        gaussGPWeightAll(x,y) += gpWeightAll[s](x,y);
+        gaussGPWeightRatio(x,y) += gpWeightRatio[s](x,y);
       }
       gaussUncert(x,y)/=scalesize;
+      //and for the other methods as well
+      gaussGPUncertainty(x,y)/=scalesize;
+      gaussGPMean(x,y)/=scalesize;
+      gaussGPMeanRatio(x,y)/=scalesize;
+      gaussGPWeightAll(x,y)/=scalesize;
+      gaussGPWeightRatio(x,y)/=scalesize;      
     }
   }
 
@@ -1727,7 +2019,18 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
   gaussUncert(0,1) = 0.04;
   ICETools::convertToRGB ( gaussUncert, imgrgb );
   imgrgb.write ( out.str() + "filtered.png" );
-
+  
+  ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
+  imgrgb.write ( out.str() + "gpUncertaintyFiltered.png" );
+  ICETools::convertToRGB ( gaussGPMean, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanFiltered.png" );
+  ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatioFiltered.png" );
+  ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAllFiltered.png" );
+  ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatioFiltered.png" );  
+  
 #endif
 
 #undef VISSEMSEG
@@ -1793,7 +2096,13 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
     vector<vector <double> > regionprob;
 
 #ifdef UNCERTAINTY
-    vector<double> regionUncert;
+    std::vector<double> regionUncert;
+    
+    std::vector<double> regionGPUncertainty;
+    std::vector<double> regionGPMean;
+    std::vector<double> regionGPMeanRatio;
+    std::vector<double> regionGPWeightAll;
+    std::vector<double> regionGPWeightRatio;    
 #endif
 
     // Wahrscheinlichkeiten für jede Region initialisieren
@@ -1808,6 +2117,12 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
       Regionen.push_back ( pair<int, Example> ( 0, Example() ) );
 #ifdef UNCERTAINTY
       regionUncert.push_back ( 0.0 );
+      
+      regionGPUncertainty.push_back ( 0.0 );
+      regionGPMean.push_back ( 0.0 );
+      regionGPMeanRatio.push_back ( 0.0 );
+      regionGPWeightAll.push_back ( 0.0 );
+      regionGPWeightRatio.push_back ( 0.0 );
 #endif
     }
 
@@ -1827,11 +2142,16 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
         }
 #ifdef UNCERTAINTY
         regionUncert[pos] += gaussUncert ( x, y );
+        
+        regionGPUncertainty[pos] += gaussGPUncertainty ( x, y );
+        regionGPMean[pos] += gaussGPMean ( x, y );
+        regionGPMeanRatio[pos] += gaussGPMeanRatio ( x, y );
+        regionGPWeightAll[pos] += gaussGPWeightAll ( x, y );
+        regionGPWeightRatio[pos] += gaussGPWeightRatio ( x, y );
 #endif
       }
     }
 
-
     /*
     cout << "regions: " << regionsize << endl;
     cout << "outfeats: " << endl;
@@ -1875,9 +2195,14 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
       Regionen[i].first = maxpos;
 #ifdef UNCERTAINTY
       regionUncert[i] /= Regionen[i].second.weight;
+      
+      regionGPUncertainty[i] /= Regionen[i].second.weight;
+      regionGPMean[i] /= Regionen[i].second.weight;
+      regionGPMeanRatio[i] /= Regionen[i].second.weight;
+      regionGPWeightAll[i] /= Regionen[i].second.weight;
+      regionGPWeightRatio[i] /= Regionen[i].second.weight;
 #endif
     }
-
     // Pixel jeder Region labeln
     for ( int y = 0; y < ( int ) mask.cols(); y++ )
     {
@@ -1887,9 +2212,15 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
         segresult.setPixel ( x, y, Regionen[pos].first );
 #ifdef UNCERTAINTY
         gaussUncert ( x, y ) = regionUncert[pos];
+        
+        gaussGPUncertainty ( x, y ) = regionGPUncertainty[pos];
+        gaussGPMean ( x, y ) = regionGPMean[pos];
+        gaussGPMeanRatio ( x, y ) = regionGPMeanRatio[pos];
+        gaussGPWeightAll ( x, y ) = regionGPWeightAll[pos];
+        gaussGPWeightRatio ( x, y ) = regionGPWeightRatio[pos];        
 #endif
       }
-    }
+    }   
 #ifdef UNCERTAINTY
     maxu = -numeric_limits<float>::max();
     minu = numeric_limits<float>::max();
@@ -1908,6 +2239,17 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
 //    uncert(0,1) = 0;
     ICETools::convertToRGB ( gaussUncert, imgrgb );
     imgrgb.write ( out.str() + "region.png" );
+    
+  ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
+  imgrgb.write ( out.str() + "gpUncertaintyRegion.png" );
+  ICETools::convertToRGB ( gaussGPMean, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRegion.png" );
+  ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatioRegion.png" );
+  ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAllRegion.png" );
+  ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatioRegion.png" );      
 #endif
 
 #undef WRITEREGIONS