In findConvexContours(), reuse an image buffer from frame to frame.
Add a keystroke command (Q) to print the timing measurements (including min & max) then quit.

git-svn-id: https://robotics.mvla.net/svn/frc971/2013/trunk/src@4133 f308d9b7-e957-4cde-b6ac-9a88185e7312
diff --git a/971CV/src/edu/wpi/first/wpijavacv/DaisyExtensions.java b/971CV/src/edu/wpi/first/wpijavacv/DaisyExtensions.java
index 31647fb..41dccb7 100644
--- a/971CV/src/edu/wpi/first/wpijavacv/DaisyExtensions.java
+++ b/971CV/src/edu/wpi/first/wpijavacv/DaisyExtensions.java
@@ -18,6 +18,7 @@
  */

 public class DaisyExtensions {

     private final CvMemStorage storage = CvMemStorage.create();

+    private IplImage contourImage;

 

     public DaisyExtensions() {

     }

@@ -59,14 +60,20 @@
     public WPIContour[] findConvexContours(WPIBinaryImage image) {

         image.validateDisposed();

 

-        // TODO(jerry): Reuse tempImage from frame to frame.

-        IplImage tempImage = IplImage.create(image.image.cvSize(),

-        	image.image.depth(), 1);

+        if (contourImage == null

+                || contourImage.cvSize().width() != image.getWidth()

+                || contourImage.cvSize().height() != image.getHeight()) {

+            if (contourImage != null) {

+                contourImage.release();

+            }

+            contourImage = IplImage.create(image.image.cvSize(),

+                    image.image.depth(), 1);

+        }

 

-        opencv_core.cvCopy(image.image, tempImage);

+        opencv_core.cvCopy(image.image, contourImage);

 

         CvSeq contours = new CvSeq();

-        opencv_imgproc.cvFindContours(tempImage, storage, contours, 256,

+        opencv_imgproc.cvFindContours(contourImage, storage, contours, 256,

         	opencv_imgproc.CV_RETR_LIST,

         	opencv_imgproc.CV_CHAIN_APPROX_TC89_KCOS);

         ArrayList<WPIContour> results = new ArrayList<WPIContour>();

@@ -79,7 +86,6 @@
             contours = contours.h_next();

         }

 

-        tempImage.release();

         WPIContour[] array = new WPIContour[results.size()];

         return results.toArray(array);

     }

diff --git a/971CV/src/org/frc971/DebugCanvas.java b/971CV/src/org/frc971/DebugCanvas.java
index 3be9a45..484620c 100644
--- a/971CV/src/org/frc971/DebugCanvas.java
+++ b/971CV/src/org/frc971/DebugCanvas.java
@@ -4,7 +4,7 @@
 import com.googlecode.javacv.cpp.opencv_core.IplImage;

 

 public class DebugCanvas {

-    public static boolean show = true;

+    public boolean show;

     private CanvasFrame canvasFrame;

     private String name;

 

diff --git a/971CV/src/org/frc971/Recognizer.java b/971CV/src/org/frc971/Recognizer.java
index c8818b7..6ab6455 100644
--- a/971CV/src/org/frc971/Recognizer.java
+++ b/971CV/src/org/frc971/Recognizer.java
@@ -14,12 +14,15 @@
      * Sets the HSV filter to allow H in [minHue .. maxHue], S >= minSat,

      * V >= minVal.

      */

-    public void setHSVRange(int minHue, int maxHue, int minSat, int minVal);

+    void setHSVRange(int minHue, int maxHue, int minSat, int minVal);

 

-    public int getHueMin();

-    public int getHueMax();

-    public int getSatMin();

-    public int getValMin();

+    int getHueMin();

+    int getHueMax();

+    int getSatMin();

+    int getValMin();

+

+    /** Enables/disables windows to view intermediate stages, for tuning. */

+    void showIntermediateStages(boolean enable);

 

     /**

      * Processes a camera image, returning an image to display for targeting

diff --git a/971CV/src/org/frc971/Recognizer2013.java b/971CV/src/org/frc971/Recognizer2013.java
index ff34fda..d2de431 100644
--- a/971CV/src/org/frc971/Recognizer2013.java
+++ b/971CV/src/org/frc971/Recognizer2013.java
@@ -94,6 +94,12 @@
     public int getValMin() { return min1Val - 1; }

 

     @Override

+    public void showIntermediateStages(boolean enable) {

+        thresholdedCanvas.show = enable;

+        morphedCanvas.show = enable;

+    }

+

+    @Override

     public WPIImage processImage(WPIColorImage cameraImage) {

         // (Re)allocate the intermediate images if the input is a different

         // size than the previous image.

diff --git a/971CV/src/org/frc971/VisionTuner.java b/971CV/src/org/frc971/VisionTuner.java
index 663b2f1..c231743 100644
--- a/971CV/src/org/frc971/VisionTuner.java
+++ b/971CV/src/org/frc971/VisionTuner.java
@@ -35,6 +35,9 @@
 /**

  * FRC 2013 vision-target recognizer tuner app.

  *

+ * <p>

+ * See {@link #processEvents()} for the keystroke commands.

+ *

  * @author jerry

  */

 public class VisionTuner {

@@ -50,15 +53,16 @@
     private final JSlider satMinSlider = new JSlider();

     private final JSlider valMinSlider = new JSlider();

 

-    private static int totalFrames;

-    private static double totalMsec;

-    //    private static double minMsec = Double.MAX_VALUE;

-    //    private static double maxMsec;

+    private int totalFrames;

+    private double totalMsec;

+    private double minMsec = Double.MAX_VALUE;

+    private double maxMsec;

 

     public VisionTuner(String[] imageFilenames) {

         cameraFrame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);

 

         loadTestImages(imageFilenames);

+        recognizer.showIntermediateStages(true);

 

         cameraFrame.getContentPane().add(panel, BorderLayout.SOUTH);

         panel.setLayout(new GridLayout(0, 2, 0, 0));

@@ -145,9 +149,9 @@
         double milliseconds = (endTime - startTime) / 1e6;

         ++totalFrames;

         totalMsec += milliseconds;

-        //        minMsec = Math.min(minMsec, milliseconds);

-        //        maxMsec = Math.max(maxMsec, milliseconds);

-        System.out.format("Processing took %.2f ms, %.2f fps, %.2f avg%n",

+        minMsec = Math.min(minMsec, milliseconds);

+        maxMsec = Math.max(maxMsec, milliseconds);

+        System.out.format("The recognizer took %.2f ms, %.2f fps, %.2f avg%n",

                 milliseconds, 1000 / milliseconds,

                 1000 * totalFrames / totalMsec);

     }

@@ -170,12 +174,19 @@
         KeyEvent e = cameraFrame.waitKey();

 

         switch (e.getKeyCode()) {

-        case KeyEvent.VK_LEFT:

+        case KeyEvent.VK_LEFT: // left arrow key: go to previous image

             previousImage();

             break;

-        case KeyEvent.VK_RIGHT:

+        case KeyEvent.VK_RIGHT: // right arrow key: go to next image

             nextImage();

             break;

+        case KeyEvent.VK_Q: // Q: print time measurements then quit

+            System.out.format("The recognizer took %.2f ms avg, %.2f min,"

+                    + " %.2f max, %.2f fps avg%n",

+                    totalMsec / totalFrames,

+                    minMsec, maxMsec,

+                    1000 * totalFrames / totalMsec);

+            System.exit(0);

         }

     }