Merge "Make index.html actually an index."
diff --git a/frc971/control_loops/python/path_edit.py b/frc971/control_loops/python/path_edit.py
index ddc38ac..c186711 100755
--- a/frc971/control_loops/python/path_edit.py
+++ b/frc971/control_loops/python/path_edit.py
@@ -72,6 +72,8 @@
         self.inValue = None
         self.startSet = False
 
+        self.module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
+
     """set extents on images"""
 
     def reinit_extents(self):
@@ -320,35 +322,45 @@
             self.spline_edit = self.points.updates_for_mouse_move(
                 self.index_of_edit, self.spline_edit, self.x, self.y, difs)
 
+    def export_json(self, file_name):
+        self.path_to_export = os.path.join(self.module_path,
+                                           "spline_jsons/" + file_name)
+        if file_name[-5:] != ".json":
+            print("Error: Filename doesn't end in .json")
+        else:
+            # Will export to json file
+            self.mode = Mode.kEditing
+            exportList = [l.tolist() for l in self.points.getSplines()]
+            with open(self.path_to_export, mode='w') as points_file:
+                json.dump(exportList, points_file)
+
+    def import_json(self, file_name):
+        self.path_to_export = os.path.join(self.module_path,
+                                           "spline_jsons/" + file_name)
+        if file_name[-5:] != ".json":
+            print("Error: Filename doesn't end in .json")
+        else:
+            # import from json file
+            self.mode = Mode.kEditing
+            self.points.resetPoints()
+            self.points.resetSplines()
+            print("LOADING LOAD FROM " + file_name) # Load takes a few seconds
+            with open(self.path_to_export) as points_file:
+                self.points.setUpSplines(json.load(points_file))
+
+            self.points.update_lib_spline()
+            print("SPLINES LOADED")
+
     def do_key_press(self, event, file_name):
         keyval = Gdk.keyval_to_lower(event.keyval)
-        module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
-        self.path_to_export = os.path.join(module_path,
-                                           "spline_jsons/" + file_name)
         if keyval == Gdk.KEY_q:
             print("Found q key and exiting.")
             quit_main_loop()
-        file_name_end = file_name[-5:]
-        if file_name_end != ".json":
-            print("Error: Filename doesn't end in .json")
-        else:
-            if keyval == Gdk.KEY_e:
-                # Will export to json file
-                self.mode = Mode.kEditing
-                print('out to: ', self.path_to_export)
-                exportList = [l.tolist() for l in self.points.getSplines()]
-                with open(self.path_to_export, mode='w') as points_file:
-                    json.dump(exportList, points_file)
+        if keyval == Gdk.KEY_e:
+            export_json(file_name)
 
-            if keyval == Gdk.KEY_i:
-                # import from json file
-                self.mode = Mode.kEditing
-                self.points.resetPoints()
-                self.points.resetSplines()
-                with open(self.path_to_export) as points_file:
-                    self.points.setUpSplines(json.load(points_file))
-
-                self.points.update_lib_spline()
+        if keyval == Gdk.KEY_i:
+            import_json(file_name)
 
         if keyval == Gdk.KEY_p:
             self.mode = Mode.kPlacing
diff --git a/frc971/control_loops/python/spline_graph.py b/frc971/control_loops/python/spline_graph.py
index 94ee683..7885ec1 100755
--- a/frc971/control_loops/python/spline_graph.py
+++ b/frc971/control_loops/python/spline_graph.py
@@ -42,10 +42,13 @@
     def configure(self, event):
         self.drawing_area.window_shape = (event.width, event.height)
 
-    # handle submitting a constraint
-    def on_submit_click(self, widget):
-        self.drawing_area.inConstraint = int(self.constraint_box.get_text())
-        self.drawing_area.inValue = int(self.value_box.get_text())
+    def output_json_clicked(self, button):
+        print("OUTPUT JSON CLICKED")
+        self.drawing_area.export_json(self.file_name_box.get_text())
+
+    def input_json_clicked(self, button):
+        print("INPUT JSON CLICKED")
+        self.drawing_area.import_json(self.file_name_box.get_text())
 
     def __init__(self):
         Gtk.Window.__init__(self)
@@ -89,6 +92,17 @@
 
         container.put(self.file_name_box, 0, 0)
 
+        self.output_json = Gtk.Button.new_with_label("Output")
+        self.output_json.set_size_request(100, 40)
+        self.output_json.connect("clicked", self.output_json_clicked)
+
+        self.input_json = Gtk.Button.new_with_label("Import")
+        self.input_json.set_size_request(100, 40)
+        self.input_json.connect("clicked", self.input_json_clicked)
+
+        container.put(self.output_json, 210, 0)
+        container.put(self.input_json, 320, 0)
+
         self.show_all()
 
 
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index a932886..1892a88 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -12,13 +12,9 @@
     args = [
         "sift_training_data.h",
     ],
-    data = [
-        ":test_images/train_loading_bay_blue.png",
-        ":test_images/train_loading_bay_red.png",
-        ":test_images/train_power_port_blue.png",
-        ":test_images/train_power_port_red.png",
-        ":test_images/train_power_port_red_webcam.png",
-    ],
+    data = glob(["calib_files/*.txt"]) + glob([
+        "test_images/*.png",
+    ]),
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
@@ -64,9 +60,9 @@
         "sift_training_data_test.h",
         "test",
     ],
-    data = [
-        ":test_images/train_power_port_red.png",
-    ],
+    data = glob(["calib_files/*.txt"]) + glob([
+        "test_images/*.png",
+    ]),
     default_python_version = "PY3",
     main = "load_sift_training.py",
     srcs_version = "PY2AND3",
diff --git a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt
new file mode 100644
index 0000000..8c1efbb
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt
@@ -0,0 +1 @@
+{"hostname": "pi-7971-3", "node_name": "pi3", "team_number": 7971, "timestamp": "Feb-13-2020-00-00-00", "camera_matrix": [[388.79947319784713, 0.0, 345.0976031055917], [0.0, 388.539148344188, 265.2780372766764], [0.0, 0.0, 1.0]], "dist_coeffs": [[0.13939139612079282, -0.24345067782097646, -0.0004228219772016648, -0.0004552350162154737, 0.08966339831250879]]}
diff --git a/y2020/vision/tools/python_code/calibrate_intrinsics.py b/y2020/vision/tools/python_code/calibrate_intrinsics.py
index df1b328..828a9a9 100644
--- a/y2020/vision/tools/python_code/calibrate_intrinsics.py
+++ b/y2020/vision/tools/python_code/calibrate_intrinsics.py
@@ -1,23 +1,63 @@
-import time
 import cv2
-import cv2.aruco as A
+import cv2.aruco
+import datetime
+import glog
+import json
+from json import JSONEncoder
 import numpy as np
+import os
+import time
 
-dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
-board = cv2.aruco.CharucoBoard_create(11, 8, .015, .011, dictionary)
-img = board.draw((200 * 11, 200 * 8))
+
+# From: https://pynative.com/python-serialize-numpy-ndarray-into-json/
+class NumpyArrayEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if isinstance(obj, np.integer):
+            return int(obj)
+        elif isinstance(obj, np.floating):
+            return float(obj)
+        elif isinstance(obj, np.ndarray):
+            return obj.tolist()
+        else:
+            return super(NumpyArrayEncoder, self).default(obj)
+
+
+def get_robot_info(hostname):
+    hostname_split = hostname[1].split("-")
+    if hostname_split[0] != "pi":
+        print("ERROR: expected hostname to start with pi!")
+        quit()
+
+    team_number = int(hostname_split[1])
+    node_name = hostname_split[0] + hostname_split[2]
+    return node_name, team_number
+
+
+USE_LARGE_BOARD = False
+
+if USE_LARGE_BOARD:
+    dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_5X5_100)
+    # Need to measure what the second size parameter is (markerLength)
+    board = cv2.aruco.CharucoBoard_create(12, 9, .06, .045, dictionary)
+    img = board.draw((200 * 12, 200 * 9))
+else:
+    dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
+    board = cv2.aruco.CharucoBoard_create(11, 8, .015, .011, dictionary)
+    img = board.draw((200 * 11, 200 * 8))
 
 #Dump the calibration board to a file
 cv2.imwrite('charuco.png', img)
 
 #Start capturing images for calibration
-CAMERA_INDEX = 2
+CAMERA_INDEX = 0  # Capture from /dev/videoX, where X=CAMERA_INDEX
 cap = cv2.VideoCapture(CAMERA_INDEX)
 
 allCorners = []
 allIds = []
 capture_count = 0
-while (capture_count < 50):
+MIN_IMAGES = 50
+
+while (capture_count < MIN_IMAGES):
 
     ret, frame = cap.read()
     assert ret, "Unable to get image from the camera at /dev/video%d" % CAMERA_INDEX
@@ -25,45 +65,69 @@
     res = cv2.aruco.detectMarkers(gray, dictionary)
     aruco_detect_image = frame.copy()
 
-    if len(res[0]) > 0:
+    if len(res[0]) > 0 and len(res[1]) > 0:
         cv2.aruco.drawDetectedMarkers(aruco_detect_image, res[0], res[1])
 
+    # Display every image to let user trigger capture
     cv2.imshow('frame', aruco_detect_image)
     keystroke = cv2.waitKey(1)
+
     if keystroke & 0xFF == ord('q'):
         break
     elif keystroke & 0xFF == ord('c'):
-        print("Res:", len(res[0]), res[1].shape)
+        glog.info("Asked to capture image")
+        if len(res[0]) == 0 or len(res[1]) == 0:
+            # Can't use this image
+            continue
+
         res2 = cv2.aruco.interpolateCornersCharuco(res[0], res[1], gray, board)
         if res2[1] is not None and res2[2] is not None and len(res2[1]) > 3:
             capture_count += 1
             charuco_detect_image = frame.copy()
             allCorners.append(res2[1])
             allIds.append(res2[2])
-            print("Res2: ", res2[1].shape, res2[2].shape)
+            glog.info("Capturing image #%d" % capture_count)
             cv2.aruco.drawDetectedCornersCharuco(charuco_detect_image, res2[1],
                                                  res2[2])
+
             cv2.imshow('frame', charuco_detect_image)
             cv2.waitKey(1000)
-            # TODO: Should log image to disk
-            print("Captured image #", capture_count)
-
-imsize = gray.shape
+            # TODO<Jim>: Should log image to disk
 
 #Calibration fails for lots of reasons. Release the video if we do
 try:
+    imsize = gray.shape
     cal = cv2.aruco.calibrateCameraCharuco(allCorners, allIds, board, imsize,
                                            None, None)
-    #print("Calibration is:\n", cal)
-    print("Reproduction error:", cal[0])
+    glog.info("Calibration is:\n", cal)
+    glog.info("Reproduction error:", cal[0])
     if (cal[0] > 1.0):
-        print("REPRODUCTION ERROR NOT GOOD")
-    # TODO<jim>: Need to save these out in format that can be used elsewhere
-    print("Calibration matrix:\n", cal[1])
-    print("Distortion Coefficients:\n", cal[2])
+        glog.error("REPRODUCTION ERROR NOT GOOD")
+    glog.info("Calibration matrix:\n", cal[1])
+    glog.info("Distortion Coefficients:\n", cal[2])
 except:
-    print("Calibration failed")
+    glog.error("Calibration failed")
     cap.release()
+    quit()
+
+hostname = os.uname()[1]
+date_str = datetime.datetime.today().strftime("%Y-%m-%d-%H-%M-%S")
+node_name, team_number = get_robot_info(hostname)
+numpyData = {
+    "hostname": hostname,
+    "node_name": node_name,
+    "team_number": team_number,
+    "timestamp": date_str,
+    "camera_matrix": cal[1],
+    "dist_coeffs": cal[2]
+}
+encodedNumpyData = json.dumps(
+    numpyData, cls=NumpyArrayEncoder)  # use dump() to write array into file
+
+# Write out the data
+calib_file = open("cam_calib_%s_%s.txt" % (hostname, date_str), "w")
+calib_file.write(encodedNumpyData)
+calib_file.close()
 
 cap.release()
 cv2.destroyAllWindows()
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 0667b16..4d8929a 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -1,6 +1,20 @@
 import copy
+import glog
+import json
 import math
 import numpy as np
+import os
+
+glog.setLevel("WARN")
+USE_BAZEL = True
+
+
+def bazel_name_fix(filename):
+    ret_name = filename
+    if USE_BAZEL:
+        ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
+
+    return ret_name
 
 
 class CameraIntrinsics:
@@ -53,9 +67,41 @@
 
 camera_list = []
 
+# TODO<Jim>: Should probably make this a dict to make replacing easier
 for team_number in (971, 7971, 8971, 9971):
-    for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4"):
+    for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4", "pi5"):
         camera_base = copy.deepcopy(web_cam_params)
         camera_base.node_name = node_name
         camera_base.team_number = team_number
         camera_list.append(camera_base)
+
+dir_name = ('calib_files')
+
+if USE_BAZEL:
+    from bazel_tools.tools.python.runfiles import runfiles
+    r = runfiles.Create()
+    dir_name = r.Rlocation(bazel_name_fix('calib_files'))
+
+for filename in os.listdir(dir_name):
+    if "cam-calib-int" in filename and filename.endswith(".txt"):
+        # Extract intrinsics from file
+        fn_split = filename.split("_")
+        hostname_split = fn_split[1].split("-")
+        if hostname_split[0] == "pi":
+            team_number = int(hostname_split[1])
+            node_name = hostname_split[0] + hostname_split[2]
+
+        calib_file = open(dir_name + "/" + filename, 'r')
+        calib_dict = json.loads(calib_file.read())
+        hostname = np.asarray(calib_dict["hostname"])
+        camera_matrix = np.asarray(calib_dict["camera_matrix"])
+        dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
+
+        # Look for match, and replace camera_intrinsics
+        for camera_calib in camera_list:
+            if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
+                glog.info("Found calib for %s, team #%d" % (node_name,
+                                                            team_number))
+                camera_calib.camera_int.camera_matrix = copy.copy(
+                    camera_matrix)
+                camera_calib.camera_int.dist_coeffs = copy.copy(dist_coeffs)
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index fb2f03a..430e83f 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -126,7 +126,7 @@
     ###
 
     # Create the reference "ideal" image
-    ideal_power_port_red = TargetData('test_images/train_power_port_red.png')
+    ideal_power_port_red = TargetData('test_images/ideal_power_port_red.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
@@ -188,7 +188,7 @@
     # and entering the pixel values from the target center for each image.
     # These are currently only used for visualization of the target
     ideal_power_port_red.target_point_2d = np.float32([[570, 192]]).reshape(
-        -1, 1, 2)  # train_power_port_red.png
+        -1, 1, 2)  # ideal_power_port_red.png
     # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
     # Add the ideal 3D target to our list
@@ -196,6 +196,7 @@
     # And add the training image we'll actually use to the training list
     training_target_power_port_red = TargetData(
         'test_images/train_power_port_red_webcam.png')
+    #'test_images/train_power_port_red_pi-7971-3.png')
     training_target_power_port_red.target_rotation = ideal_power_port_red.target_rotation
     training_target_power_port_red.target_position = ideal_power_port_red.target_position
     training_target_power_port_red.target_radius = target_radius_default
@@ -206,7 +207,7 @@
     ### Red Loading Bay
     ###
 
-    ideal_loading_bay_red = TargetData('test_images/train_loading_bay_red.png')
+    ideal_loading_bay_red = TargetData('test_images/ideal_loading_bay_red.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
@@ -233,7 +234,7 @@
         loading_bay_height / 2.
     ])
     ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(
-        -1, 1, 2)  # train_loading_bay_red.png
+        -1, 1, 2)  # ideal_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
     training_target_loading_bay_red = TargetData(
@@ -247,7 +248,7 @@
     ### Blue Power Port
     ###
 
-    ideal_power_port_blue = TargetData('test_images/train_power_port_blue.png')
+    ideal_power_port_blue = TargetData('test_images/ideal_power_port_blue.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
@@ -299,7 +300,7 @@
         power_port_target_height
     ])
     ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(
-        -1, 1, 2)  # train_power_port_blue.png
+        -1, 1, 2)  # ideal_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
     training_target_power_port_blue = TargetData(
@@ -314,7 +315,7 @@
     ###
 
     ideal_loading_bay_blue = TargetData(
-        'test_images/train_loading_bay_blue.png')
+        'test_images/ideal_loading_bay_blue.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
@@ -343,7 +344,7 @@
         loading_bay_height / 2.
     ])
     ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(
-        -1, 1, 2)  # train_loading_bay_blue.png
+        -1, 1, 2)  # ideal_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
     training_target_loading_bay_blue = TargetData(
diff --git a/y2020/vision/tools/python_code/test_images/ideal_loading_bay_blue.png b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_blue.png
new file mode 100644
index 0000000..c3c0aea
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/ideal_loading_bay_red.png b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_red.png
new file mode 100644
index 0000000..42091a6
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_red.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/ideal_power_port_blue.png b/y2020/vision/tools/python_code/test_images/ideal_power_port_blue.png
new file mode 100644
index 0000000..a3a7597
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_power_port_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/ideal_power_port_red.png b/y2020/vision/tools/python_code/test_images/ideal_power_port_red.png
new file mode 100644
index 0000000..9d2f0bf
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_power_port_red.png
Binary files differ
diff --git a/y2020/vision/viewer_replay.cc b/y2020/vision/viewer_replay.cc
index c81796a..82ab11a 100644
--- a/y2020/vision/viewer_replay.cc
+++ b/y2020/vision/viewer_replay.cc
@@ -11,6 +11,8 @@
 DEFINE_string(config, "y2020/config.json", "Path to the config file to use.");
 DEFINE_string(logfile, "", "Path to the log file to use.");
 DEFINE_string(node, "pi1", "Node name to replay.");
+DEFINE_string(image_save_prefix, "/tmp/img",
+              "Prefix to use for saving images from the logfile.");
 
 namespace frc971 {
 namespace vision {
@@ -31,7 +33,8 @@
   std::unique_ptr<aos::EventLoop> event_loop =
       reader.event_loop_factory()->MakeEventLoop("player", node);
 
-  event_loop->MakeWatcher("/camera", [](const CameraImage &image) {
+  int image_count = 0;
+  event_loop->MakeWatcher("/camera", [&image_count](const CameraImage &image) {
     cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
     CHECK(image_mat.isContinuous());
     const int number_pixels = image.rows() * image.cols();
@@ -41,6 +44,10 @@
     }
 
     cv::imshow("Display", image_mat);
+    if (!FLAGS_image_save_prefix.empty()) {
+      cv::imwrite("/tmp/img" + std::to_string(image_count++) + ".png",
+                  image_mat);
+    }
     cv::waitKey(1);
   });